seq_id
stringlengths 4
11
| text
stringlengths 113
2.92M
| repo_name
stringlengths 4
125
⌀ | sub_path
stringlengths 3
214
| file_name
stringlengths 3
160
| file_ext
stringclasses 18
values | file_size_in_byte
int64 113
2.92M
| program_lang
stringclasses 1
value | lang
stringclasses 93
values | doc_type
stringclasses 1
value | stars
int64 0
179k
⌀ | dataset
stringclasses 3
values | pt
stringclasses 78
values |
---|---|---|---|---|---|---|---|---|---|---|---|---|
26924932220
|
import os, sys
import json
import xml.etree.ElementTree as ET
labels_trad = {
'bras levés' : 'arms_raised',
'combat' : 'fighting',
'action' : 'action',
'aucune' : 'none',
'se lever' : 'stand_up',
'se baisser' : 'bend_down',
's\'assoir' : 'sit_down',
'se coucher' : 'lie_down',
'chuter' : 'falling',
'déplacement' : 'movement',
'stationnaire' : 'stationary',
'lent' : 'slow',
'normal' : 'normal',
'rapide' : 'fast',
'posture' : 'posture',
'debout' : 'standing',
'assis sur un objet' : 'sitting_on_object',
'assis au sol' : 'sitting_on_ground',
'allongé' : 'lying_down',
'à genoux' : 'kneeling',
'penché' : 'bending',
'anormal' : 'unnatural',
'autre' : 'other',
'accroupi' : 'squatting',
'gestes' : 'gesture',
'aucuns' : 'none',
'false' : 'false',
'true' : 'true'
}
def add_track_to_coco_annotations(track, annotations, area):
k = 0
n_track = len(annotations)
for pose in track["poses"]:
if pose["fully_occluded"] == "0":
annotation = {}
annotation["keypoints"] = pose["keypoints"]
annotation["image_id"] = pose["image_id"]
annotation["id"] = n_track + k
annotation['iscrowd'] = 0
annotation['area'] = area
annotations.append(annotation)
k+=1
def add_track_to_action_annotations(track, annotations):
k = 0
n_track = len(annotations)
track_annotations = {}
track_annotations["poses"] = []
for pose in track["poses"]:
annotation = pose
del annotation['fully_occluded']
annotation["category_id"] = 1
annotation["id"] = n_track + k
annotation["num_keypoints"] = 14
track_annotations["poses"].append(annotation)
k+=1
annotations.append(track_annotations)
def add_xml_to_data(images, coco_annotations, action_annotations, xml_path):
'''
Add datas from a xml to a dictionnary, empty at first
:param dic Dictionary : target dictionary
:param xml_path str : input data xml path
'''
tree = ET.parse(xml_path)
root = tree.getroot()
n_images = len(images)
k=0
video_name = root.find("meta").find("task").find("name").text
width, height = root.find("meta").find("task").find("original_size").find("width").text, root.find("meta").find("task").find("original_size").find("height").text
for child in root:
if child.tag == "track":
track_info = {}
track_info["width"], track_info["height"] = width, height
track_info["poses"] = []
for points in child:
frame = int(points.attrib["frame"])
image_id = n_images + frame
if frame > k - 1 :
frame_id = str(frame).zfill(6)
image = {}
image["file_name"] = video_name.split(".")[0] + "_frame_" + frame_id + ".png"
image["width"] = width
image["height"] = height
image["id"] = image_id
images.append(image)
k+=1
pose = {}
pose["keypoints"] = convert_points_format(points.attrib["points"])
pose["image_id"] = image_id
pose["labels"] = {}
for pose_attrib in points:
pose["labels"][labels_trad[pose_attrib.attrib["name"]]] = labels_trad[pose_attrib.text]
pose["fully_occluded"] = points.attrib["occluded"]
track_info["poses"].append(pose)
add_track_to_coco_annotations(track_info, coco_annotations, int(width) * int(height) )
add_track_to_action_annotations(track_info,action_annotations )
def convert_points_format(points_str):
new_str = points_str.replace(";", ",").split(",")
str_keypoints = new_str[0:28]
str_mask = new_str[28:42]
keypoints = []
for k in range(0,len(str_keypoints), 2):
if k < 28:
keypoints.append(float(str_keypoints[k]))
keypoints.append(float(str_keypoints[k+1]))
keypoints.append(2) # visibility
for k in range(len(str_mask)):
mask_value = int(float(str_mask[k]))
if mask_value == 2:
keypoints[3*k + 2] = 1.1
elif mask_value == 1:
keypoints[3*k + 2] = 1.2
return keypoints
def to_json_coco(images, annotations_coco):
coco_dic = {}
coco_dic["images"] = images
coco_dic["annotations"] = annotations_coco
keypoints_categorie = {}
keypoints_categorie["supercategory"] = "person"
keypoints_categorie["id"] = 1
keypoints_categorie["name"] = "person"
keypoints_categorie["keypoints"] = ["head", "neck", "left_shoulder", "right_shoulder", "left_elbow", "right_elbow", "left_wrist", "right_wrist", "left_hip", "right_hip", "left_knee", "right_knee", "left_ankle", "right_ankle"]
keypoints_categorie["skeleton"] = [[0,1],[1,2],[1,3],[2,4],[3,5],[4,6],[5,7],[1,8],[1,9],[8,10],[9,11], [10,12],[11,13]]
coco_dic["categories"] = [keypoints_categorie]
print("{} images, {} annotations".format(len(coco_dic["images"]), len(coco_dic["annotations"])))
return coco_dic
if __name__ == '__main__':
images, coco_annotations, action_annotations = [], [], []
add_xml_to_data(images, coco_annotations, action_annotations, sys.argv[1])
add_xml_to_data(images, coco_annotations, action_annotations, sys.argv[2])
add_xml_to_data(images, coco_annotations, action_annotations, sys.argv[3])
add_xml_to_data(images, coco_annotations, action_annotations, sys.argv[4])
print(len(coco_annotations))
json_tasks_coco = to_json_coco(images, coco_annotations)
with open(sys.argv[5], "w") as outfile:
json.dump(json_tasks_coco, outfile, indent=None)
json_tasks_actions = to_json_coco(images, action_annotations)
with open(sys.argv[6], "w") as outfile:
json.dump(json_tasks_actions, outfile, indent=None)
|
anessabiri/test_version_nogit_15
|
transforms/cvat_to_coco.py
|
cvat_to_coco.py
|
py
| 6,034 |
python
|
en
|
code
| 0 |
github-code
|
50
|
28078196972
|
# -*- coding: utf-8 -*-
"""
@Author 坦克手贝塔
@Date 2023/5/17 10:01
"""
from typing import List
"""
输入一个链表的头节点,从尾到头反过来返回每个节点的值(用数组返回)。
示例 1:
输入:head = [1,3,2]
输出:[2,3,1]
"""
"""
思路:用列表把所有的值都存起来,再反序即可
"""
# Definition for singly-linked list.
class ListNode:
def __init__(self, x):
self.val = x
self.next = None
class Solution:
@staticmethod
def reversePrint(head: ListNode) -> List[int]:
nums = []
while head:
nums.append(head.val)
head = head.next
return nums[::-1]
|
TankManBeta/LeetCode-Python
|
剑指Offer_06_easy.py
|
剑指Offer_06_easy.py
|
py
| 691 |
python
|
en
|
code
| 0 |
github-code
|
50
|
13420508024
|
from django.shortcuts import render, HttpResponse, redirect
from .forms import MovimentacaoForm, LoginForm, PoupancaForm
from .models import Movimentacao, Carteira, Poupanca
from django.contrib import messages
from django.contrib.auth import login, logout
from usuario.models import Usuario
# Create your views here.
def saldo(request):
if request.user.is_authenticated:
saldo_usuario = Carteira.objects.filter(usuario_id=request.user.id).first()
print(saldo_usuario)
lista_movi = Movimentacao.objects.filter(usuario_id=request.user.id).order_by('id')
entradas = lista_movi.filter(tipo_movimentacao="Entrada")
despesas = lista_movi.filter(tipo_movimentacao="Despesa")
if lista_movi.count() > 0:
entradas_usuario = 0
entradas_usuario = sum([e.valor or 0 for e in entradas])
despesas_usuario = sum([d.valor or 0 for d in despesas])
saldo_usuario.saldo = entradas_usuario - despesas_usuario
saldo_usuario.save()
return saldo_usuario.saldo
else:
pass
else:
pass
def index(request):
if request.user.is_anonymous:
return render(request, 'login.html')
else:
movimentacao = Movimentacao.objects.all()
carteira = Carteira.objects.filter(usuario=request.user).first()
saldo_do_usuario = saldo(request)
context = { 'movimentacao' : movimentacao, 'carteira' : carteira , "saldo_do_usuario" : saldo_do_usuario }
return render(request, 'index.html', context)
def index_submit(request):
if request.method == 'POST':
print(request.POST)
form = MovimentacaoForm(request.POST, request.FILES)
print(form.is_valid())
if form.is_valid():
try:
mov = Movimentacao.objects.create(
valor = form.cleaned_data['valor'],
usuario_id = form.cleaned_data['usuario'],
carteira_id = form.cleaned_data['carteira'],
descricao = form.cleaned_data['descricao'],
data = form.cleaned_data['data'],
tipo_movimentacao = form.cleaned_data['tipo_movimentacao'],
)
mov.save()
messages.success(request, 'Movimentação adicionada com sucesso')
return redirect('index')
except Exception as e:
print(e)
# print(form.cleaned_data['tipo_movimentacao'])
messages.error(request,form.errors)
return redirect('/')
else:
messages.error(request, form.errors)
# print(form.errors)
return redirect('/')
def login_user(request):
if request.user.is_authenticated:
return redirect('/')
return render(request, 'login.html')
def login_submit(request):
if request.method == 'POST':
if request.POST:
form = LoginForm(request.POST)
if form.is_valid():
user = Usuario.objects.filter(email=form.cleaned_data['username']).first()
print(user)
print(Usuario.objects.filter(email=form.cleaned_data['username']))
if user:
if user.check_password(form.cleaned_data['password']):
messages.success(request, 'Logout realizado com sucesso')
login(request, user)
return redirect('pagina_inicial')
else:
messages.error(request, 'Usuário ou senha inválido')
else:
messages.error(request, 'Usuário ou senha inválido')
messages.error('Erro ao logar')
return render(request, 'login.html')
def logout_user(request):
if request.user.is_authenticated:
logout(request)
#messages.sucess(request, 'Logout realizado com sucesso')
return redirect('login')
#depois modificar para direcionar para página inicial, quando tiver uma
def extrato(request):
if request.user.is_authenticated:
lista_mov = Movimentacao.objects.filter(usuario=request.user).order_by('-data')
saldo_do_usuario = saldo(request)
context = {'lista_mov' : lista_mov, 'saldo_do_usuario' : saldo_do_usuario}
return render(request, 'extrato.html', context)
else:
return redirect('login')
def pagina_inicial(request):
if request.user.is_authenticated:
saldo_do_usuario = saldo(request)
context = {"saldo_do_usuario" : saldo_do_usuario }
return render(request,'paginainicial.html', context)
else:
return redirect('login')
def poupanca(request):
if request.user.is_authenticated:
lista_poupanca = Poupanca.objects.order_by('-id')
context = {'lista_poupanca' : lista_poupanca}
return render(request,'poupanca.html', context)
else:
return redirect('login')
def nova_poupanca(request):
if request.user.is_authenticated:
lista_poupanca = Poupanca.objects.order_by('-id')
context = {'lista_poupanca' : lista_poupanca}
return render(request, "novapoupanca.html", context)
else:
return redirect('login')
def nova_poupanca_submit(request):
if request.user.is_authenticated:
if request.method == 'POST':
form = PoupancaForm(request.POST)
print(form.is_valid())
if form.is_valid():
try:
poup = Poupanca.objects.create(
nome_poupanca = form.cleaned_data['nome_poupanca'],
saldo_poupanca = form.cleaned_data['saldo_poupanca'],
)
poup.save()
messages.success(request, "Poupança criada com sucesso")
lista_poupanca = Poupanca.objects.order_by('-id')
context = {'lista_poupanca' : lista_poupanca}
return render(request,'novapoupanca.html', context)
except:
messages.error(request,'Erro ao criar objeto')
return render(request,'novapoupanca.html')
else:
messages.error(request, form.errors)
return render(request,'novapoupanca.html')
else:
return render(request, 'novapoupanca.html', {'poup' : poup})
else:
return redirect('login')
|
annydomingos/Supple
|
financeiro/views.py
|
views.py
|
py
| 6,435 |
python
|
pt
|
code
| 1 |
github-code
|
50
|
16486696564
|
import os
raw_directory = '/home/julius/ScienceBowl/ScienceBowlFormat/raw'
new_directory = '/home/julius/ScienceBowl/ScienceBowlFormat/new'
def get_new_filename(short_filename):
short_to_long = {
'astr': 'astronomy',
'genr': 'general_science',
'biol': 'biology',
'chem': 'chemistry',
'phys': 'physics',
'ersc': 'earth_and_space_science',
}
if short_filename in short_to_long.keys():
return short_to_long[short_filename]
else:
return 'unknown'
def main():
cwd = os.getcwd()
for raw_file in os.listdir(raw_directory):
os.chdir(raw_directory)
correct_choices = [
'a)',
'b)',
'c)',
'd)',
]
wrong_choices = {
'w)': 'a.',
'x)': 'b.',
'y)': 'c.',
'z)': 'd.',
}
wrong_answers = {
'W': 'A',
'X': 'B',
'Y': 'C',
'Z': 'D',
}
raw_filename = os.fsdecode(raw_file)
if raw_filename.endswith(".txt"):
short_filename = raw_filename[3:7]
new_filename = get_new_filename(short_filename) + '.rtf'
new_lines = [
'Science Bowl ' + get_new_filename(short_filename).capitalize() + '\n',
'\n',
'Multiple Choice\n',
'\n'
]
counter = 1
with open(file=raw_filename, mode='r', errors='ignore') as raw:
between = False
is_not_mc = False
for raw_line in raw: # select relevant lines
mid_line = raw_line
if mid_line.startswith(' '):
mid_line = mid_line[1:]
if mid_line == '\n':
continue
new_line = mid_line
if mid_line[:2] != 'z)' and mid_line[:2] != 'd)' and new_lines[-1][:2] == 'c.':
new_lines.append('d. none of the above\n')
if mid_line[:2] in correct_choices and not is_not_mc:
new_line = mid_line[:1] + '.' + mid_line[2:]
if new_line[:2] == 'a.':
new_lines[-1] = new_lines[-1] + '\n'
if new_line[2] != ' ':
new_line = new_line[:2] + ' ' + new_line[2:]
is_not_mc = False
between = False
elif mid_line[:2] in wrong_choices.keys() and not is_not_mc:
new_line = wrong_choices[mid_line[:2]] + mid_line[2:]
if new_line[:2] == 'a.':
new_lines[-1] = new_lines[-1] + '\n'
if new_line[2] != ' ':
new_line = new_line[:2] + ' ' + new_line[2:]
is_not_mc = False
between = False
elif mid_line.startswith('ANSWER:'):
if is_not_mc:
new_line = ''
else:
if mid_line[8] in wrong_answers.keys():
new_line = 'ANS: ' + wrong_answers[mid_line[8]] + '\n'
else:
new_line = 'ANS:' + mid_line[7:9] + '\n'
new_line = new_line + 'TOP: ' + short_filename.upper() + '\n'
is_not_mc = False
between = False
elif 'Short Answer:' in mid_line or 'True-False:' in mid_line:
new_line = ''
is_not_mc = True
elif mid_line.startswith(short_filename.upper()) and 'Short Answer:' not in mid_line and \
'True-False:' not in mid_line and 'Multiple Choice:' not in mid_line:
new_line = ''
is_not_mc = True
elif mid_line.startswith(short_filename.upper()):
new_line = str(counter) + '. ' + mid_line[26:-2]
counter += 1
between = True
elif is_not_mc:
new_line = ''
elif between:
new_line = mid_line[:-2]
else:
new_line = ''
if new_line != ' ':
new_lines.append(new_line)
os.chdir(new_directory)
if counter <= 250:
with open(new_filename, 'w+') as new:
new.writelines(new_lines)
else:
top_lines = new_lines[:4]
questions = new_lines[4:]
count = 1
for num in range(len(questions)):
if questions[num].startswith(str(count) + '. '):
dot = questions[num].find('.')
number = questions[num][:dot]
remainder = int(number) % 250
if remainder == 0:
remainder += 250
questions[num] = str(remainder) + questions[num][dot:]
count += 1
cutoffs = []
limit = 1
for num in range(len(questions)):
if questions[num].startswith('1. '):
cutoffs.append(num)
limit += 1
cutoffs.append(len(questions) - 1)
index = 0
chunks = counter // 250 + 1
for chunk in range(chunks):
chunk_filename = new_filename[:-4] + str(chunk+1) + '.rtf'
start = cutoffs[index]
end = cutoffs[index+1]
with open(chunk_filename, 'w+') as new:
new.writelines(top_lines)
new.writelines(questions[start:end])
index += 1
os.chdir(cwd)
if __name__ == '__main__':
main()
|
juliustao/ScienceBowlFormat
|
format.py
|
format.py
|
py
| 6,198 |
python
|
en
|
code
| 1 |
github-code
|
50
|
72917307674
|
class Node:
def __init__(self, data,seatno,present):
self.data = data
self.seatno=seatno
self.next = None
self.prev = None
self.present=present
class DoublyLinkedList:
def __init__(self):
self.head = None
def append(self, new_data,seatno,present):
new_node = Node(new_data,seatno,present)
new_node.next = None
if self.head is None:
new_node.prev = None
self.head = new_node
return
last = self.head
while(last.next is not None):
last = last.next
last.next = new_node
new_node.prev = last
return
def printList(self, node):
print ("\nTraversal in forward direction")
while(node is not None):
print(node.data)
last = node
node = node.next
def push(self, new_data,seatno,present):
print("push start")
new_node=Node(new_data,seatno,present)
new_node.next=self.head
new_node.prev=None
if self.head is not None:
self.head.prev = new_node
self.head = new_node
def deleteNodeend(self,node):
print ("\nDelete last")
while(node is not None):
if node.next==None:
last.next=None
last=node
node = node.next
def deleteNodeFront(self,node):
print("\nDelete Front")
self.head=node.next
node.next=None
llist = DoublyLinkedList()
seats=int(input("\nEnter number of seats"))
for i in range(1,seats+1):
llist.append(0,i,'')
llist.printList(llist.head)
llist.push(10,10,'new')
llist.push(11,11,'new')
llist.printList(llist.head)
llist.deleteNodeend(llist.head)
llist.printList(llist.head)
llist.deleteNodeFront(llist.head)
llist.printList(llist.head)
|
tanmay6414/Python
|
DSA_in_python/dequeue.py
|
dequeue.py
|
py
| 1,950 |
python
|
en
|
code
| 0 |
github-code
|
50
|
19031455400
|
import streamlit as st
from main import get_graph, get_download_graph
def graph():
pattern = st.text_input("Слово, которое мы хотим найти в сообщениях")
time_from = st.text_input("от какого времени (указывать в формате год-месяц-день)")
time_to = st.text_input("по какое время мы хотим найти (указывать в формате год-месяц-день)")
max_thikness = st.text_input("параметр задает на сколько групп надо разбить уже отфильтрованные данные", value=10)
border = st.text_input("порог фильтрации данных, если вы укажите 0,9 то получите 10% с конца"
"(то есть последние 10% наибольших данных)", value=0.9)
if (pattern and time_from and time_to) != "":
st.graphviz_chart(get_graph(pattern=pattern, time_from=time_from, time_to=time_to,
max_thikness=int(max_thikness), border=float(border)))
st.download_button(label="Скачать граф", data=get_download_graph())
graph()
|
artem12345-png/CV
|
message_graph/server.py
|
server.py
|
py
| 1,252 |
python
|
ru
|
code
| 0 |
github-code
|
50
|
22617246797
|
from .HTTPClient import HTTPClient
def autoFillFeatures(options=None):
features = options.get('features', []) if options else []
if options and 'question' in options and 'question_answer' not in features:
features.append('question_answer')
return features
class SceneXClient(HTTPClient):
def __init__(self, headers=None):
baseUrl = 'https://us-central1-causal-diffusion.cloudfunctions.net'
defaultHeaders = {
'Content-Type': 'application/json',
}
mergedHeaders = defaultHeaders.update(headers)
super().__init__(baseUrl=baseUrl, headers=defaultHeaders)
def from_array(self, input, options=None):
return {
'data': [
{
'image': i,
'features': autoFillFeatures(options),
**(options or {})
}
for i in input
]
}
def from_string(self, input, options=None):
return {
'data': [
{
'image': input,
'features': autoFillFeatures(options),
**(options or {})
}
]
}
def to_simplified_output(self, output):
if not output.get('result') or any(x.get('text') != '' for x in output['result']) is False:
raise Exception('Remote API Error, bad output: {}'.format(json.dumps(output)))
return {
'results': [
{
'output': r['answer'] if 'answer' in r and r['answer'] is not None else r['text'],
'i18n': r['i18n']
}
for r in output['result']
]
}
def describe(self, data, options = None):
raw_output = self.post('/describe', data)
simplified_output = self.to_simplified_output(raw_output)
if options and 'raw' in options:
simplified_output['raw'] = raw_output
return simplified_output
|
standardgalactic/jinaai-py
|
jinaai/clients/SceneXClient.py
|
SceneXClient.py
|
py
| 2,028 |
python
|
en
|
code
| null |
github-code
|
50
|
18553789292
|
from django.urls import path, include
from .views import (
telegram_index, viber_index, ChannelListView,
ChannelDetailView, ChannelFullDetailView, ChannelCreateView,
ChannelUpdateView, ChannelDeleteView, BotUpdateView,
BotCreateView, root_view, ajax_channels_update,
ajax_get_channels, channel_list_view,
ajax_webhook, ajax_get_moderators, ajax_unset_webhook,
keyboards_constructor_view, statistics_view
)
app_name = "bots-management"
urlpatterns = [
path("", root_view, name="root"),
path("ajax_get_channels/",
ajax_get_channels,
name='ajax_get_channels'),
path("ajax_get_moderators/",
ajax_get_moderators,
name='ajax_get_moderators'),
path("ajax_channels_update/",
ajax_channels_update,
name='ajax_channels_update'),
path("channels_new/",
channel_list_view,
name="channel-list-new"),
path("ajax_webhook/",
ajax_webhook,
name="ajax_webhook"),
path("ajax_unset_webhook/",
ajax_unset_webhook,
name="ajax_unset_webhook"),
path("channels/",
ChannelListView.as_view(),
name="channel-list"),
path("channel/<str:slug>/",
ChannelDetailView.as_view(),
name="channel-detail"),
path("channel_full/<str:slug>/",
ChannelFullDetailView.as_view(),
name="channel-full-detail"),
path("channel_create/",
ChannelCreateView.as_view(),
name="channel-create"),
path("channel_delete/<str:slug>/",
ChannelDeleteView.as_view(),
name="channel-delete"),
path("channel_update/<str:slug>/",
ChannelUpdateView.as_view(),
name="channel-update"),
path("channel/<str:slug>/bot_create/",
BotCreateView.as_view(),
name="bot-create"),
path("channel/<str:slug>/bot_update/<int:pk>/",
BotUpdateView.as_view(),
name="bot-update"),
path("keyboards_new",
keyboards_constructor_view,
name="keyboards-new"),
path("statistics_new",
statistics_view,
name="statistics-new"),
# actions with keyboards, related to certain channel
path("channel/<str:slug>/",
include("keyboards.urls", namespace="keyboards")),
# bots and subscribers' analytics
path("channel/<str:slug>/",
include("analytics.urls", namespace="analytics")),
# subscribers and messages
path("subscribers/",
include("subscribers.urls", namespace="subscribers")),
# bots` mailings
path("channel/<str:slug>/",
include("bots_mailings.urls", namespace="mailings")),
path("telegram_prod/<str:slug>/", telegram_index),
path("viber_prod/<str:slug>/", viber_index),
]
|
wykyee/old-bot
|
bots_management/urls.py
|
urls.py
|
py
| 2,745 |
python
|
en
|
code
| 0 |
github-code
|
50
|
10733655437
|
from MovieLens import MovieLens
from surprise import KNNBasic
import heapq
from collections import defaultdict
from operator import itemgetter
import socket
def simpleUserCFGive(id):
testSubject = str(id)
k = 10
# Load our data set and compute the user similarity matrix
ml = MovieLens()
data = ml.loadMovieLensLatestSmall()
trainSet = data.build_full_trainset()
sim_options = {'name': 'cosine',
'user_based': True
}
model = KNNBasic(sim_options=sim_options)
model.fit(trainSet)
simsMatrix = model.compute_similarities()
# Get top N similar users to our test subject
# (Alternate approach would be to select users up to some similarity threshold - try it!)
testUserInnerID = trainSet.to_inner_uid(testSubject)
similarityRow = simsMatrix[testUserInnerID]
similarUsers = []
for innerID, score in enumerate(similarityRow):
if (innerID != testUserInnerID):
similarUsers.append( (innerID, score) )
kNeighbors = heapq.nlargest(k, similarUsers, key=lambda t: t[1])
# Get the stuff they rated, and add up ratings for each item, weighted by user similarity
candidates = defaultdict(float)
for similarUser in kNeighbors:
innerID = similarUser[0]
userSimilarityScore = similarUser[1]
theirRatings = trainSet.ur[innerID]
for rating in theirRatings:
candidates[rating[0]] += (rating[1] / 5.0) * userSimilarityScore
# Build a dictionary of stuff the user has already seen
watched = {}
for itemID, rating in trainSet.ur[testUserInnerID]:
watched[itemID] = 1
# Get top-rated items from similar users:
s="\n"+str(id)
pos = 0
for itemID, ratingSum in sorted(candidates.items(), key=itemgetter(1), reverse=True):
if not itemID in watched:
movieID = trainSet.to_raw_iid(itemID)
s+=","+ml.getMovieName(int(movieID))
pos += 1
if (pos > 10):
break
file = open("E:\\Neeraj\\SimpleUserCFBase.txt", "r")
alld=file.readlines()
file.close()
file1 = open("E:\\Neeraj\\SimpleUserCFBase.txt", "w")
for r1 in alld:
print(r1)
u=r1.find(",")
if(r1[0:u]==str(id)):
pass
else:
file1.write(r1)
file1.write(s)
file1.close()
print ("\nDone")
def Main():
host = "127.0.0.3"
port = 5000
mySocket = socket.socket()
mySocket.bind((host,port))
while(True):
mySocket.listen(10)
conn, addr = mySocket.accept()
print ("Connection from: " + str(addr))
data = conn.recv(1024).decode()
print ("from connected user: " + str(data))
simpleUserCFGive(int(data))
conn.close()
if __name__ == '__main__':
Main()
#simpleUserCFGive(2)
|
neerajrp1999/Movie-App-Including-recommender-system
|
SimpleUserCF/SimpleUserCF/SimpleUserCF.py
|
SimpleUserCF.py
|
py
| 3,023 |
python
|
en
|
code
| 0 |
github-code
|
50
|
16558263898
|
import gzip
import itertools
import os
import time
import sqlalchemy as sa
from sqlalchemy.orm import sessionmaker
from imicrobe.uproc_results.uproc_models import SampleToUproc, Uproc
def main():
# connect to database on server
# e.g. mysql+pymysql://load:<password>@localhost/load
db_uri = os.environ.get('IMICROBE_DB_URI')
imicrobe_engine = sa.create_engine(db_uri, echo=False)
# reflect tables
meta = sa.MetaData()
meta.reflect(bind=imicrobe_engine)
Session = sessionmaker(bind=imicrobe_engine)
session = Session()
drop_table(SampleToUproc, engine=imicrobe_engine)
drop_table(Uproc, engine=imicrobe_engine)
Uproc.__table__.create(imicrobe_engine)
load_pfam_table(session=session, engine=imicrobe_engine)
# how many rows in the Uproc table?
uproc_row_count = session.query(Uproc).count()
print('{} rows in the uproc table after inserting data from pfamA.txt.gz'.format(uproc_row_count))
load_dead_pfam(session=session, engine=imicrobe_engine)
# how many rows in the Uproc table?
uproc_row_count = session.query(Uproc).count()
print('{} rows in the uproc table after inserting data from dead_family.txt.gz'.format(uproc_row_count))
def drop_table(table, engine):
# delete the relationship table first
try:
table.__table__.drop(engine)
print('dropped table "{}"'.format(table.__tablename__))
except Exception as e:
print(e)
def take(n, iterable):
"Return first n items of the iterable as a list"
return list(itertools.islice(iterable, n))
def grouper(iterable, n, fillvalue=None):
"Collect data into fixed-length chunks or blocks"
# grouper('ABCDEFG', 3, 'x') --> ABC DEF Gxx"
args = [iter(iterable)] * n
return itertools.zip_longest(*args, fillvalue=fillvalue)
def load_pfam_table(session, engine):
debug = False
line_group_length = 2000
pfamA_fp = 'data/pfamA.txt.gz'
# had problems on myo with U+009D in PF01298 description
# not a problem with imicrobe-vm on my laptop
# this is the error:
# UnicodeEncodeError: 'latin-1' codec can't encode characters in position 1089-1090: ordinal not in range(256)
# why is 'latin-1' codec being used?
# specifying encoding='latin-1' and errors='replace' solves the problem on myo
with gzip.open(pfamA_fp, 'rt', encoding='latin-1', errors='replace') as pfamA_file:
for line_group in grouper(pfamA_file.readlines(), line_group_length, fillvalue=None):
line_counter = 0
t0 = time.time()
for line in (line_ for line_ in line_group if line_ is not None):
line_counter += 1
pfam_acc, pfam_identifier, pfam_aliases, pfam_name, _, _, _, _, description, *the_rest = line.strip().split('\t')
if debug:
print('pfam accession : {}'.format(pfam_acc))
print('pfam identifier : {}'.format(pfam_identifier))
print('pfam aliases : {}'.format(pfam_aliases))
print('pfam name : {}'.format(pfam_name))
print('description : {}'.format(description))
if session.query(Uproc).filter(Uproc.accession==pfam_acc).one_or_none():
pass
#print('{} is already in the database'.format(pfam_acc))
else:
# insert
session.add(
Uproc(
accession=pfam_acc,
identifier=pfam_identifier,
name=pfam_name,
description=description))
session.commit()
print(
'committed {} rows in {:5.1f}s'.format(
line_counter,
time.time()-t0))
print('table "{}" has {} rows'.format(Uproc.__tablename__, session.query(Uproc).count()))
def load_dead_pfam(session, engine):
# there are some strange rows in this file
debug = False
dead_pfam_fp = 'data/dead_family.txt.gz'
with gzip.open(dead_pfam_fp, 'rt') as dead_pfam_file:
for line in dead_pfam_file:
dead_pfam_accession, pfam_identifier, pfam_cause_of_death, *_ = line.strip().split('\t')
if debug:
print('************* line:\n\t{}'.format(line))
print(dead_pfam_accession)
print(pfam_identifier)
print(pfam_cause_of_death)
print('\n')
if session.query(Uproc).filter(Uproc.accession == dead_pfam_accession).one_or_none():
print('dead Pfam accession "{}" is already in table uproc'.format(dead_pfam_accession))
else:
# insert
session.add(
Uproc(
accession=dead_pfam_accession,
identifier=pfam_identifier,
name='dead',
description=pfam_cause_of_death))
session.commit()
print('table "{}" has {} rows'.format(Uproc.__tablename__, session.query(Uproc).count()))
"""
pfam_url = 'http://pfam.xfam.org'
pfam_family_url = urllib.parse.urljoin(pfam_url, '/family')
for sample_uproc_id_i in session.query(models.Sample_uproc.uproc_id).order_by(models.Sample_uproc.uproc_id).distinct().limit(10):
print(sample_uproc_id_i)
# is the PFAM annotation already in the database?
if session.query(Uproc).filter(Uproc.pfam_annot_id == sample_uproc_id_i.uproc_id).one_or_none() is None:
response = requests.get(
url=pfam_family_url,
params={'acc': sample_uproc_id_i, 'output': 'xml'})
response_root = ET.fromstring(response.text)
description = response_root[0][1].text
pfam_annot_i = Uproc(pfam_acc=sample_uproc_id_i, annot=description)
session.add(pfam_annot_i)
else:
print('{} is already in Uproc table'.format(sample_uproc_id_i))
session.commit()
session.close()
"""
if __name__ == '__main__':
main()
|
hurwitzlab/imicrobe-data-loaders
|
imicrobe/load/uproc_results/load_pfam_table.py
|
load_pfam_table.py
|
py
| 6,158 |
python
|
en
|
code
| 0 |
github-code
|
50
|
29011691648
|
"""An interface for interacting with the num2vid config.json."""
import json
from .errors import ConfigPathError, ConfigReadError
class Config:
"""An interface for interacting with the num2vid config.json.
:attr _path: path to the current instance's config json.
:type _path: str
:attr _config: python dictionary mirror of the json.
:tpye _config: dict
"""
def __init__(self, path: str):
"""Initialize with config path.
:param path: path to the config json.
"""
self._path = path
self._config = self.load()
def get(self, key: str, default: str = None) -> str:
"""Get value by key from current instance's config.
:param key: key to retrieve.
:param default: default value to return if key not found
"""
return self._config.get(key, default)
@property
def path(self) -> str:
"""Retrieve path to current instance's config json."""
return self._path
def update(self, new_config_dict: dict) -> dict:
"""Update the instance dict.
:param new_config_dict: dictionary of preferences to update.
"""
self._config.update(new_config_dict)
return self._config
def clear(self):
"""Reset the prefs json to an empty dict and save."""
self.save({})
def save(self, config_dict:dict) -> dict:
"""Apply given config_dict to disk and update isntance dict.
:param config_dict: the dict to write to disk.
"""
try:
with open(self._path, "w+") as config_fo:
config_fo.write(json.dumps(config_dict))
except (FileNotFoundError, PermissionError, json.decoder.JSONDecodeError) as err:
if isinstance(err, json.decoder.JSONDecodeError):
raise ConfigReadError(err)
raise ConfigPathError(err)
self._config = config_dict
return self._config
def load(self) -> dict:
"""Retrieve the contents of config.json and set to the current instance."""
config_dict = {}
try:
with open(self._path, "r") as config_fo:
config_dict = json.load(config_fo)
except (FileNotFoundError, PermissionError, json.decoder.JSONDecodeError) as err:
if isinstance(err, json.decoder.JSONDecodeError):
raise ConfigReadError(err)
raise ConfigPathError(err)
self._config = config_dict
return self._config
|
jacobmartinez3d/num2vid
|
num2vid/config.py
|
config.py
|
py
| 2,504 |
python
|
en
|
code
| 0 |
github-code
|
50
|
26255326658
|
#!/usr/bin/env python
from twisted.internet import reactor
from coherence.upnp.core import DIDLLite
from coherence.upnp.core.ssdp import SSDPServer
from coherence.upnp.core.msearch import MSearch
from coherence.upnp.core.device import Device, RootDevice
from coherence.extern import louie
class DevicesListener(object):
def __init__(self):
self.ssdp = SSDPServer()
self.msearch = MSearch(self.ssdp, test=False)
self.devices = []
louie.connect(self.ssdp_detected, 'Coherence.UPnP.SSDP.new_device', louie.Any)
louie.connect(self.ssdp_deleted, 'Coherence.UPnP.SSDP.removed_device', louie.Any)
louie.connect(self.device_found, 'Coherence.UPnP.RootDevice.detection_completed', louie.Any)
self.msearch.double_discover()
def _get_device_by_id(self, id):
found = None
for device in self.devices:
this_id = device.get_id()
if this_id[:5] != 'uid:':
this_id = this_id[5:]
if this_id == id:
found = device
break
return found
def _get_device_by_usn(self, usn):
found = None
for device in self.devices:
if device.get_usn() == usn:
found = device
break
return found
def ssdp_detected(self, device_type, infos, *args, **kwargs):
print("Found ssdp %s"%(infos,))
if infos['ST'] == 'upnp:rootdevice':
root = RootDevice(infos)
else:
root_id = infos['USN'][:-len(infos['ST']) - 2]
root = self._get_device_by_id(root_id)
device = Device(infos, root) # kicks off loading of the device info
# which will call device_found callback
def ssdp_deleted(self, device_type, infos, *args, **kwargs):
device = self._get_device_with_usn(infos['USN'])
if device:
louie.send('Coherence.UPnP.Device.removed', None, usn=infos['USN'])
self.devices.remove(device)
device.remove()
if infos['ST'] == 'upnp:rootdevice':
louie.send('Coherence.UPnP.RootDevice.removed', None, usn=infos['USN'])
def device_found(self, device):
print("Found device %s"%(device,))
self.devices.append(device)
for service in device.get_services():
print(" %s @ %s"%(service.get_type(), service.get_control_url()))
if 'ContentDirectory' in service.get_type():
for actionname,action in service.get_actions().items():
if action.get_name() == 'Browse':
d = action.call(
ObjectID='0',
BrowseFlag='BrowseDirectChildren',
Filter='*', SortCriteria='',
StartingIndex='0',
RequestedCount='0'
)
d.addCallback(self.browse_callback)
def browse_callback(self, result):
results = DIDLLite.DIDLElement.fromString(result['Result']).getItems()
print([result.title for result in results])
def browse_error(self, error):
print(error.getTraceback())
devices = DevicesListener()
print("Beginning")
reactor.run()
|
kpister/prompt-linter
|
data/scraping/repos/hufman~coherence_experiments/ssdp.py
|
ssdp.py
|
py
| 2,768 |
python
|
en
|
code
| 0 |
github-code
|
50
|
10539216817
|
#!/usr/bin/python
# coding: UTF-8
# original code URL https://github.com/xkumiyu/chainer-GAN-CelebA
# revised by Nakkkkk(https://github.com/Nakkkkk)
import numpy
import chainer
from chainer import cuda
import chainer.functions as F
import chainer.links as L
def add_noise(h, sigma=0.2):
xp = cuda.get_array_module(h.data)
if chainer.config.train:
return h + sigma * xp.random.randn(*h.shape)
else:
return h
# Minibatch_Discriminationによるモード崩壊の防止(http://musyoku.github.io/2016/12/23/Improved-Techniques-for-Training-GANs/)
class Minibatch_Discrimination(chainer.Chain):
"""
Minibatch Discrimination Layer
Parameters
---------------------
B: int
number of rows of M
C: int
number of columns of M
wscale: float
std of normal initializer
"""
def __init__(self, B, C, wscale):
super(Minibatch_Discrimination, self).__init__()
self.b = B
self.c = C
with self.init_scope():
# initialozer to W
w = chainer.initializers.Normal(wscale)
# register Parameters
self.t = L.Linear(in_size=None,
out_size=B*C,
initialW=w,
nobias=True) # bias is required ?
def __call__(self, x):
"""
Calucurate Minibatch Discrimination using broardcast.
Parameters
---------------
x: Variable
input vector shape is (N, num_units)
"""
batch_size = x.shape[0]
xp = x.xp
activation = self.t(x)
m = F.reshape(activation, (-1, self.b, self.c))
m = F.expand_dims(m, 3)
m_T = F.transpose(m, (3, 1, 2, 0))
m, m_T = F.broadcast(m, m_T)
l1_norm = F.sum(F.absolute(m-m_T), axis=2)
# eraser to erase l1 norm with themselves
eraser = F.expand_dims(xp.eye(batch_size, dtype="f"), 1)
eraser = F.broadcast_to(eraser, (batch_size, self.b, batch_size))
o_X = F.sum(F.exp(-(l1_norm + 1e6 * eraser)), axis=2)
# concatunate along channels or units
return F.concat((x, o_X), axis=1)
class Discriminator(chainer.Chain):
def __init__(self, wscale=0.02, unrolling_steps=5):
self.b, self.c = 32, 8
w = chainer.initializers.Normal(wscale)
self.unrolling_steps = unrolling_steps
super(Discriminator, self).__init__()
with self.init_scope():
self.c0_0 = L.Convolution2D(3, 64, 3, stride=2, pad=1, initialW=w)
self.c0_1 = L.Convolution2D(64, 128, 4, stride=2, pad=1, initialW=w)
self.c1_0 = L.Convolution2D(128, 128, 3, stride=1, pad=1, initialW=w)
self.c1_1 = L.Convolution2D(128, 256, 4, stride=2, pad=1, initialW=w)
self.c2_0 = L.Convolution2D(256, 256, 3, stride=1, pad=1, initialW=w)
self.c2_1 = L.Convolution2D(256, 512, 4, stride=2, pad=1, initialW=w)
#self.c3_0 = L.Convolution2D(512, 512, 3, stride=1, pad=1, initialW=w)
self.l4_0 = L.Linear(4 * 4 * 512, 128, initialW=w)
self.md1 = Minibatch_Discrimination(
B=self.b, C=self.c, wscale=wscale)
#self.l4 = L.Linear(4 * 4 * 512, 1, initialW=w)
self.l4 = L.Linear(None, 12, initialW=w)
self.bn0_1 = L.BatchNormalization(128, use_gamma=False)
self.bn1_0 = L.BatchNormalization(128, use_gamma=False)
self.bn1_1 = L.BatchNormalization(256, use_gamma=False)
self.bn2_0 = L.BatchNormalization(256, use_gamma=False)
self.bn2_1 = L.BatchNormalization(512, use_gamma=False)
self.bn3_0 = L.BatchNormalization(512, use_gamma=False)
def cache_discriminator_weights(self):
self.cached_weights = {}
for name, param in self.namedparams():
with cuda.get_device(param.data):
xp = cuda.get_array_module(param.data)
self.cached_weights[name] = xp.copy(param.data)
def restore_discriminator_weights(self):
for name, param in self.namedparams():
with cuda.get_device(param.data):
if name not in self.cached_weights:
raise Exception()
param.data = self.cached_weights[name]
def __call__(self, x):
h = add_noise(x)
h = F.leaky_relu(add_noise(self.c0_0(h)))
h = F.leaky_relu(add_noise(self.bn0_1(self.c0_1(h))))
h = F.leaky_relu(add_noise(self.bn1_0(self.c1_0(h))))
h = F.leaky_relu(add_noise(self.bn1_1(self.c1_1(h))))
h = F.leaky_relu(add_noise(self.bn2_0(self.c2_0(h))))
h = F.leaky_relu(add_noise(self.bn2_1(self.c2_1(h))))
#h = F.leaky_relu(add_noise(self.bn3_0(self.c3_0(h))))
h = self.l4_0(h)
h = self.md1(h)
h = self.l4(h)
return h
class Encoder(chainer.Chain):
def __init__(self, wscale=0.02):
w = chainer.initializers.Normal(wscale)
super(Encoder, self).__init__()
with self.init_scope():
self.c0_0 = L.Convolution2D(3, 64, 3, stride=2, pad=1, initialW=w)
self.c0_1 = L.Convolution2D(64, 128, 4, stride=2, pad=1, initialW=w)
self.c1_0 = L.Convolution2D(128, 128, 3, stride=1, pad=1, initialW=w)
self.c1_1 = L.Convolution2D(128, 256, 4, stride=2, pad=1, initialW=w)
self.c2_0 = L.Convolution2D(256, 256, 3, stride=1, pad=1, initialW=w)
self.c2_1 = L.Convolution2D(256, 512, 4, stride=2, pad=1, initialW=w)
self.c3_0 = L.Convolution2D(512, 512, 3, stride=1, pad=1, initialW=w)
self.l4 = L.Linear(4 * 4 * 512, 100, initialW=w)
self.bn0_1 = L.BatchNormalization(128, use_gamma=False)
self.bn1_0 = L.BatchNormalization(128, use_gamma=False)
self.bn1_1 = L.BatchNormalization(256, use_gamma=False)
self.bn2_0 = L.BatchNormalization(256, use_gamma=False)
self.bn2_1 = L.BatchNormalization(512, use_gamma=False)
self.bn3_0 = L.BatchNormalization(512, use_gamma=False)
def __call__(self, x):
h = F.leaky_relu(self.c0_0(x))
h = F.leaky_relu(self.bn0_1(self.c0_1(h)))
h = F.leaky_relu(self.bn1_0(self.c1_0(h)))
h = F.leaky_relu(self.bn1_1(self.c1_1(h)))
h = F.leaky_relu(self.bn2_0(self.c2_0(h)))
h = F.leaky_relu(self.bn2_1(self.c2_1(h)))
h = F.leaky_relu(self.bn3_0(self.c3_0(h)))
h = self.l4(h)
return h
class EncoderGenerator(chainer.Chain):
def __init__(self, wscale=0.02):
super(EncoderGenerator, self).__init__()
self.n_hidden = 100
with self.init_scope():
# Encoder
w = chainer.initializers.Normal(wscale)
self.c0_0 = L.Convolution2D(3, 64, 3, stride=2, pad=1, initialW=w)
self.c0_1 = L.Convolution2D(64, 128, 4, stride=2, pad=1, initialW=w)
self.c1_0 = L.Convolution2D(128, 128, 3, stride=1, pad=1, initialW=w)
self.c1_1 = L.Convolution2D(128, 256, 4, stride=2, pad=1, initialW=w)
self.c2_0 = L.Convolution2D(256, 256, 3, stride=1, pad=1, initialW=w)
self.c2_1 = L.Convolution2D(256, 512, 4, stride=2, pad=1, initialW=w)
self.c3_0 = L.Convolution2D(512, 512, 3, stride=1, pad=1, initialW=w)
self.l4 = L.Linear(4 * 4 * 512, 100, initialW=w)
self.bn0_1 = L.BatchNormalization(128, use_gamma=False)
self.bn1_0 = L.BatchNormalization(128, use_gamma=False)
self.bn1_1 = L.BatchNormalization(256, use_gamma=False)
self.bn2_0 = L.BatchNormalization(256, use_gamma=False)
self.bn2_1 = L.BatchNormalization(512, use_gamma=False)
self.bn3_0 = L.BatchNormalization(512, use_gamma=False)
# Generator
self.l0 = L.Linear(100, 4 * 4 * 512, initialW=w)
self.dc1 = L.Deconvolution2D(512, 256, 4, stride=2, pad=1, initialW=w)
self.dc2 = L.Deconvolution2D(256, 128, 4, stride=2, pad=1, initialW=w)
self.dc3 = L.Deconvolution2D(128, 64, 4, stride=2, pad=1, initialW=w)
self.dc4 = L.Deconvolution2D(64, 3, 4, stride=2, pad=1, initialW=w)
self.bn0 = L.BatchNormalization(4 * 4 * 512)
self.bn1 = L.BatchNormalization(256)
self.bn2 = L.BatchNormalization(128)
self.bn3 = L.BatchNormalization(64)
def make_hidden(self, batchsize):
return numpy.random.uniform(-1, 1, (batchsize, self.n_hidden, 1, 1))\
.astype(numpy.float32)
def __call__(self, x):
# Encoder
h = F.leaky_relu(self.c0_0(x))
h = F.leaky_relu(self.bn0_1(self.c0_1(h)))
h = F.leaky_relu(self.bn1_0(self.c1_0(h)))
h = F.leaky_relu(self.bn1_1(self.c1_1(h)))
h = F.leaky_relu(self.bn2_0(self.c2_0(h)))
h = F.leaky_relu(self.bn2_1(self.c2_1(h)))
h = F.leaky_relu(self.bn3_0(self.c3_0(h)))
h = self.l4(h)
# Generator
h = F.reshape(F.leaky_relu(self.bn0(self.l0(h))), (len(h), 512, 4, 4))
h = F.leaky_relu(self.bn1(self.dc1(h)))
h = F.leaky_relu(self.bn2(self.dc2(h)))
h = F.leaky_relu(self.bn3(self.dc3(h)))
x = F.sigmoid(self.dc4(h))
return x
|
Nakkkkk/chainer-GAN-CelebA-anime-annotated
|
net.py
|
net.py
|
py
| 9,311 |
python
|
en
|
code
| 0 |
github-code
|
50
|
21021275299
|
from ..crawler import TableCrawler
from ..entities import Party
from ..utils import parse_vote_count
URL = 'https://www.cec.gov.tw/pc/zh_TW/L4/n00000000000000000.html'
TOTAL_SEATS = 34
def calculate_round_1(vote_counts):
total_vote_count = sum(vote_counts.values())
result = sorted(
((name, TOTAL_SEATS * vote_count / total_vote_count)
for name, vote_count in vote_counts.items()),
key=lambda r: r[1] % 1, reverse=True,
)
return result
def calculate_round_2(round_1_result):
remaining = TOTAL_SEATS - sum(int(row[1]) for row in round_1_result)
result = [
Party(name, int(count) + 1)
for name, count in round_1_result[:remaining]
] + [
Party(name, int(count))
for name, count in round_1_result[remaining:]
]
return result
def crawl():
crawler = TableCrawler(URL)
info_list = crawler.crawl()
vote_counts = {
info['政黨']: parse_vote_count(info)
for info in info_list if float(info['得票率%']) >= 5.0
}
round_1_result = calculate_round_1(vote_counts)
final_result = calculate_round_2(round_1_result)
return final_result
|
uranusjr/electionpeeker
|
electionpeeker/sources/national.py
|
national.py
|
py
| 1,167 |
python
|
en
|
code
| 1 |
github-code
|
50
|
18525853716
|
from util import *
from bs4 import BeautifulSoup
import json
years = [
['2017', 'http://www.fortunechina.com/fortune500/c/2017-07/20/content_286785.htm'],
['2016', 'http://www.fortunechina.com/fortune500/c/2016-07/20/content_266955.htm'],
['2015', 'http://www.fortunechina.com/fortune500/c/2015-07/22/content_244435.htm'],
['2014', 'http://www.fortunechina.com/fortune500/c/2014-07/07/content_212535.htm'],
]
def crawl_companys():
f = open('./files/companys', 'w')
for year_item in years:
req = build_request(year_item[-1])
res_text = req.text.encode("iso-8859-1").decode('utf-8')
table = BeautifulSoup(res_text, 'lxml').find(
'table', {'id': 'yytable'}).find_all('tr')
for tr in table[1:]:
td_list = tr.find_all('td')
line = [year_item[0]]
for td in td_list:
line.append(td.get_text())
url = tr.find('a').get('href')
line.append(url)
f.write(json.dumps(line, ensure_ascii=False)+'\n')
f.close()
def crawl_2013_companys():
page = 1
f = open('./files/companys', 'a')
while page < 6:
if page != 1:
url = 'http://www.fortunechina.com/fortune500/c/2013-07/08/content_164375_{}.htm'.format(
page)
else:
url = 'http://www.fortunechina.com/fortune500/c/2013-07/08/content_164375.htm'
req = build_request(url)
res_text = req.text.encode("iso-8859-1").decode('utf-8')
table = BeautifulSoup(res_text, 'lxml').find(
'table', {'class': 'rankingtable'}).find_all('tr')
for tr in table[1:]:
td_list = tr.find_all('td')
line = ['2013']
for td in td_list:
line.append(td.get_text())
url = tr.find('a').get('href')
line.append(url)
f.write(json.dumps(line, ensure_ascii=False)+'\n')
page+=1
f.close()
def get_company_info(url):
req=build_request(url)
thisyeardata=BeautifulSoup(req.text,'lxml').find('div',{'class':'thisyeardata'}).find_all('tr')
result={}
for tr in thisyeardata:
if '<table' in str(tr):
continue
if '国家' in str(tr):
value=tr.find('td').get_text().replace('国家','').replace(':','').replace(':','').replace('\r','').replace('\n','').replace(' ','')
result['国家']=value
if '员工数' in str(tr):
value=tr.find_all('td')[-1].get_text().replace('员工数','').replace(':','').replace(':','').replace('\r','').replace('\n','').replace(' ','')
result['员工数']=value
if '营业收入' in str(tr):
value=tr.find_all('td')[1].get_text()
result['营业收入']=value
value=tr.find_all('td')[2].get_text()
result['营业收入增减']=value
if '利润' in str(tr) and '利润占比' not in str(tr):
value=tr.find_all('td')[1].get_text()
result['利润']=value
value=tr.find_all('td')[2].get_text()
result['利润增减']=value
if '资产' in str(tr) and '资产收益' not in str(tr) and '资产控股' not in str(tr):
value=tr.find_all('td')[1].get_text()
result['资产']=value
value=tr.find_all('td')[2].get_text()
result['资产增减']=value
if '股东权益' in str(tr):
value=tr.find_all('td')[1].get_text()
result['股东权益']=value
value=tr.find_all('td')[2].get_text()
result['股东权益增减']=value
if '净利率' in str(tr):
value=tr.find_all('td')[1].get_text()
result['净利率']=value
if '资产收益率' in str(tr):
value=tr.find_all('td')[1].get_text()
result['资产收益率']=value
return result
def crawl_info():
for line in open('./files/companys','r'):
company=json.loads(line)
try:
info=get_company_info(company[-1])
except:
f=open('./files/companys_fail','a')
f.write(json.dumps(company, ensure_ascii=False)+'\n')
f.close()
continue
info['base']=company
f=open('./files/companys_info','a')
f.write(json.dumps(info, ensure_ascii=False)+'\n')
f.close()
print(company)
def load_companys():
headers=['name','国家']
year_list=['2013','2014','2015','2016','2017']
year_list.reverse()
for info_key in ['排名','员工数','营业收入','营业收入增减','利润','利润增减','净利率','资产','资产增减','资产收益率','股东权益','股东权益增减']:
for year in year_list:
headers.append(year+' '+info_key)
yield headers
result={}
for line in open('./files/companys_info','r'):
company=json.loads(line)
key=company['base'][3]
key=sub_str(key,append=[' '])
year=company['base'][0]
if key in result:
result[key][year]=company
else:
result[key]={}
result[key][year]=company
for company_key in result:
line=['','']
for year in year_list:
if year not in result[company_key]:
line.append('')
continue
line[0]=result[company_key][year]['base'][3]
line[1]=result[company_key][year]['base'][-2]
#当年排名
line.append(result[company_key][year]['base'][1])
for info_key in ['员工数','营业收入','营业收入增减','利润','利润增减','净利率','资产','资产增减','资产收益率','股东权益','股东权益增减']:
for year in year_list:
if year not in result[company_key]:
line.append('')
continue
line.append(sub_str(result[company_key][year][info_key]))
yield line
#crawl_info()
write_to_excel(load_companys(),'世界500强.xlsx')
|
19js/Nyspider
|
www.fortunechina.com/fortune500.py
|
fortune500.py
|
py
| 6,154 |
python
|
en
|
code
| 16 |
github-code
|
50
|
23814910118
|
import csv
from datetime import datetime
DEGREE_SYBMOL = u"\N{DEGREE SIGN}C"
def format_temperature(temp):
"""Takes a temperature and returns it in string format with the degrees
and celcius symbols.
Args:
temp: A string representing a temperature.
Returns:
A string contain the temperature and "degrees celcius."
"""
return f"{temp}{DEGREE_SYBMOL}"
def convert_date(iso_string): # DONE!
"""Converts and ISO formatted date into a human readable format.
Args:
iso_string: An ISO date string..
Returns:
A date formatted like: Weekday Date Month Year e.g. Tuesday 06 July 2021
"""
x = datetime.fromisoformat(iso_string)
# print(x.strftime("%A %d %B %Y"))
return x.strftime("%A %d %B %Y")
"""
%A Weekday, full version Wednesday
%d Day of month 31
%B Month name, full version December
%Y Year, full version 2018
"""
def convert_f_to_c(temp_in_farenheit): # DONE
"""Converts an temperature from farenheit to celcius.
Args:
temp_in_farenheit: float representing a temperature.
Returns:
A float representing a temperature in degrees celcius, rounded to 1dp.
"""
temp_in_c_float = ((float(temp_in_farenheit) - 32) * (5/9))
rounded_temp = round(temp_in_c_float,1)
return rounded_temp
def calculate_mean(weather_data):
"""Calculates the mean value from a list of numbers.
Args:
weather_data: a list of numbers.
Returns:
A float representing the mean value.
"""
#def calculate_mean(a, b):
#total = a + b
#mean = total / 2
#return mean
#print(calculate_mean(3, 4))2
total = 0
for list_item in weather_data:
total += float(list_item)
mean_value = total / len(weather_data)
return mean_value
#print(calculate_mean([51.0, 58.2, 59.9, 52.4, 52.1, 48.4, 47.8, 53.43])) (to run the test while working in waether.py to determine what's being printed)
def load_data_from_csv(csv_file): # DONE
"""Reads a csv file and stores the data in a list.
Args:
csv_file: a string representing the file path to a csv file.
Returns:
A list of lists, where each sublist is a (non-empty) line in the csv file.
"""
weather_data = []
with open(csv_file) as csv_file: # you can name csv_file anything
reader = csv.reader(csv_file)
for line in reader:
if line != []:
weather_data.append(line)
weather_data_integer = weather_data[1:] # deletes 1st row from the tests (contains headings in string format)/data/example csv files
for daily_data_format in weather_data_integer:
daily_data_format[1] = int(daily_data_format[1]) # daily_data_format refers to the presentation of each line: (datetime_str, min_int, max_int)
daily_data_format[2] = int(daily_data_format[2])
return weather_data_integer
def find_min(weather_data): #[34,25, 18, 57, 69]
"""Calculates the minimum value in a list of numbers.
Args:
weather_data: A list of numbers.
Returns:
The minimum value and it's position in the list.
"""
if weather_data == []:
return ()
else:
min_value = weather_data[0]
index = 0
min_index = 0
for num in weather_data:
if float(num) <= float(min_value):
min_value = float(num)
min_index = index
index += 1
return (min_value, min_index)
def find_max(weather_data):
"""Calculates the maximum value in a list of numbers.
Args:
weather_data: A list of numbers.
Returns:
The maximum value and it's position in the list.
"""
if weather_data == []:
return ()
else:
max_value = weather_data[0]
index = 0
max_index = 0
for num in weather_data:
if float(num) >= float(max_value):
max_value = float(num)
max_index = index
index += 1
return (max_value, max_index)
def generate_summary(weather_data):
"""Outputs a summary for the given weather data.
Args:
weather_data: A list of lists, where each sublist represents a day of weather data.
Returns:
A string containing the summary information.
My Notes:
The lowest temperature will be 9.4°C, and will occur on Friday 02 July 2021.
The highest temperature will be 20.0°C, and will occur on Saturday 03 July 2021.
The average low this week is 12.2°C.
The average high this week is 17.8°C.
"""
list_min = []
for list_all_mins in weather_data:
list_min.append(list_all_mins[1])
list_max = []
for list_all_max in weather_data:
list_max.append(list_all_max[2])
low_average = (calculate_mean(list_min))
high_average = (calculate_mean(list_max))
min_value, min_index = find_min(list_min)
max_value, max_index = find_max(list_max)
result = ""
no_of_rows = len(weather_data)
result = result + str(no_of_rows) + " Day Overview\n"
result = result + " The lowest temperature will be "
result = result + f"{format_temperature(convert_f_to_c(min_value))}"
result = result + ", and will occur on "
result = result + f"{convert_date(weather_data[min_index][0])}.\n"
result = result + " The highest temperature will be "
result = result + f"{format_temperature(convert_f_to_c(max_value))}"
result = result + ", and will occur on "
result = result + f"{convert_date(weather_data[max_index][0])}.\n"
result = result + " The average low this week is "
result = result + f"{format_temperature(convert_f_to_c(low_average))}.\n"
result = result + " The average high this week is "
result = result + f"{format_temperature(convert_f_to_c(high_average))}.\n"
return result
#Note: the below is added to see what information is printed when I run the weather.py.
# print(generate_summary([
# ["2021-07-02T07:00:00+08:00", 49, 67],
# ["2021-07-03T07:00:00+08:00", 57, 68],
# ["2021-07-04T07:00:00+08:00", 56, 62],
# ["2021-07-05T07:00:00+08:00", 55, 61],
# ["2021-07-06T07:00:00+08:00", 53, 62]
# ]))
def generate_daily_summary(weather_data):
"""Outputs a daily summary for the given weather data.
Args:
weather_data: A list of lists, where each sublist represents a day of weather data.
Returns:
A string containing the summary information.
My Notes:
row refers to each input(unformatted) line; sections of the each row e.g.
date,min,max
2021-07-02T07:00:00+08:00,49,67
2021-07-03T07:00:00+08:00,57,68
2021-07-04T07:00:00+08:00,56,62
2021-07-05T07:00:00+08:00,55,61
2021-07-06T07:00:00+08:00,53,62
[0 ,1,2] (sections of the list seperated by commas)
"""
result = "" # this represents output to be produced in string format
for row in weather_data:
result = result + "---- "
result = result + f"{convert_date(row[0])}"
result = result + " ----\n"
result = result + " Minimum Temperature: "
result = result + f"{format_temperature(convert_f_to_c(row[1]))}" + "\n"
result = result + " Maximum Temperature: "
result = result + f"{format_temperature(convert_f_to_c(row[2]))}" + "\n"
result = result + "\n"
return result
|
SheCodesAus/she-codes-python-weather-project-Rosie-Gul-codes
|
weather.py
|
weather.py
|
py
| 7,592 |
python
|
en
|
code
| 0 |
github-code
|
50
|
31158236285
|
# -*- coding: utf-8 -*-
"""
Created on Wed May 16 23:39:51 2012
@author: Maxim
"""
def getRandPrefix(fileExt = "", addSymbol = ""):
from random import randrange
from time import gmtime, strftime
Time = int(strftime("%H%M%S", gmtime()))
NamePrefix = str(Time+randrange(0,1e6,1)) + addSymbol
if fileExt != "":
NamePrefix = NamePrefix + "." + fileExt
return(NamePrefix)
h = getRandPrefix()
print(h)
|
mishin/maxim-codes
|
getRandPrefix.py
|
getRandPrefix.py
|
py
| 470 |
python
|
en
|
code
| 0 |
github-code
|
50
|
43247112469
|
# -*- coding: utf-8 -*-
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import time
import datetime
import math
import sys
sys.path.append('/lib/python2.7/site-packages')
import random
import numpy as np
import tensorflow as tf
def print_debug(msg):
if False:
print(msg)
WIDTH = 400
HEIGHT = 400
SELECTION_SIZE = 2 # 4
POPULATION_SIZE = 10 # 10
STEP = 200 # 200
GENERATION = 10000 # 1000
NUM_FOOD = 100 # 50
NUM_POISON = 0 # 0
MUTATION_BIAS = 1.0
SENSOR_LENGTH_WALL = 100
SENSOR_LENGTH_FOOD = 100
SENSOR_LENGTH_POISON = 100
AGENT_RADIUS = 20
AGENT_STEP_THETA = math.pi / 2
FOOD_RADIUS = 2
POISON_RADIUS = 2
INPUT_BIAS = 0.5
class Genome(object):
NUM_FEATURE = 6 # [壁距離, 壁角度, 餌距離, 餌角度, 敵距離, 敵距離] # それぞれ1つしか認識できない
NUM_HIDDEN_NODE = [2] # 各レイヤーのノード数
NUM_HIDDEN = len(NUM_HIDDEN_NODE) # レイヤー数
NUM_OUTPUT = 2
ARRAY = [NUM_FEATURE] + NUM_HIDDEN_NODE + [NUM_OUTPUT]
LAYER = len(ARRAY) - 1
GENE_LENGTH = -1
MUTATION_RATE = 0.01
@classmethod
def gene_length(cls):
if 0 <= cls.GENE_LENGTH:
return cls.GENE_LENGTH
length = 0
for index, i in enumerate(cls.ARRAY[0: -1]):
length += i * cls.ARRAY[index + 1]
cls.GENE_LENGTH = length
return cls.GENE_LENGTH
def __init__(self, mutation_rate=None):
length = self.gene_length()
self._gene = np.random.rand(length).astype(np.float32) - np.random.rand(length).astype(np.float32)
if mutation_rate:
self._mutation_rate = mutation_rate
else:
self._mutation_rate = 1.0 / length
self._fitness = 0.0
def _gene_layer_offset(self, layer):
# layerのスタート地点までのオフセット
length = 0
for index, i in enumerate(Genome.ARRAY[0: layer]):
length += i * Genome.ARRAY[index + 1]
return length
def gene_layer(self, layer):
start = self._gene_layer_offset(layer)
end = self._gene_layer_offset(layer + 1)
return self._gene[start:end]
def mutate(self):
length = len(self._gene)
mutate = np.zeros(length).astype(np.float32)
rand = np.random.rand(length)
for i in range(length):
if rand[i] <= self._mutation_rate:
val = np.random.rand() - np.random.rand()
print_debug("mutate[%d] = %f" % (i, val))
mutate[i] = val * MUTATION_BIAS
else:
mutate[i] = 0.0
self._gene += mutate
def copy_gene(self):
# deep copy
return self._gene.copy()
def set_gene(self, gene):
self._gene = gene
def set_fitness(self, fitness):
self._fitness = fitness
def get_fitness(self):
return self._fitness
class NN(object):
def __init__(self, population):
self._x, self._model = self._build_nn(population)
@classmethod
def _flatten(cls, population):
# make flatten all genome info for GPU calculation
size = len(population)
gene_length = Genome.gene_length()
index = 0
flat = np.zeros(size * gene_length)
for layer in range(Genome.LAYER):
for genome in population:
arr = genome.gene_layer(layer)
end = index + len(arr)
flat[index:end] = arr
index = end
return flat
@classmethod
def _build_nn(cls, population):
size = len(population)
x = tf.placeholder(tf.float32, [size, None, Genome.NUM_FEATURE], name="input")
genes = cls._flatten(population)
print_debug("----genes-----")
print_debug(genes)
start = 0
length = size * Genome.NUM_FEATURE * Genome.NUM_HIDDEN_NODE[0]
c1 = tf.constant(genes[start:length],
dtype=tf.float32,
shape=[size, Genome.NUM_FEATURE, Genome.NUM_HIDDEN_NODE[0]],
name="layer1")
w1 = tf.Variable(c1)
x1 = 2 * tf.nn.sigmoid(tf.matmul(x, w1)) - 1
print_debug("----layer1[{}:{}]----".format(start, length))
print_debug(genes[start:length])
start = length
length = start + size * Genome.NUM_HIDDEN_NODE[0] * Genome.NUM_OUTPUT
c2 = tf.constant(genes[start:length],
dtype=tf.float32,
shape=[size, Genome.NUM_HIDDEN_NODE[0], Genome.NUM_OUTPUT],
name="layer2")
w2 = tf.Variable(c2)
x2 = tf.nn.sigmoid(tf.matmul(x1, w2))
print_debug("----layer2[{}:{}]----".format(start, length))
print_debug(genes[start:length])
return x, x2
def eval(self, input):
fetch = self._model.eval(feed_dict={self._x: input})
return fetch[:, 0]
class GenePool(object):
def __init__(self, size):
self._generation = 0 # 世代数
self._selection_size = SELECTION_SIZE
self._size = size
self._population = [] # List[Genome]
for i in range(size):
self._population.append(Genome())
self._nn = None
self._shuffle_arr = [i for i in range(self._size)]
@classmethod
def build_filename(cls, prefix):
return prefix + "_p.npy"
def save_population(self, ts):
filename = './data/{}_{}_p.npy'.format(ts, self._generation)
population = [[genome._gene, genome.get_fitness()] for genome in self._population]
np.save(filename, population)
def load_population(self, filename):
arr = np.load(filename)
for index, item in enumerate(arr):
gene, fitness = item
genome = self._population[index] # type Genome
genome.set_gene(gene)
genome.set_fitness(fitness)
def get_genome(self, index):
# type: (int) -> Genome
return self._population[index]
def print_all_genome(self):
for genome in self._population:
print(genome._gene)
def init_world(self):
self._nn = NN(self._population)
def play(self, input):
return self._nn.eval(input=input)
def set_fitness(self, index, fitness):
self._population[index].set_fitness(fitness)
def get_fitness(self, index):
return self._population[index].get_fitness()
def get_elite_index(self):
elite_index = 0
elite_fitness = 0
for index, genome in enumerate(self._population):
fit = genome.get_fitness()
if elite_fitness < fit:
elite_fitness = fit
elite_index = index
return elite_index
def mutation(self, elite_index=-1):
for index, genome in enumerate(self._population):
if index != elite_index:
# print("Mutate[{}]: fitness={}".format(index, genome.get_fitness()))
genome.mutate()
def selection(self):
self._generation += 1
self._tournament_selection()
def _tournament_selection(self):
random.shuffle(self._shuffle_arr)
index_arr = self._shuffle_arr[0: self._selection_size]
# print("tournament_index: {}".format(index_arr))
winner = self._population[index_arr[0]] # type: Genome
losers = [] # List[Genome]
for i in range(1, self._selection_size):
winner_fitness = winner.get_fitness()
challenger_index = index_arr[i]
# print("challenger_index: {}".format(challenger_index))
challenger = self._population[challenger_index] # type: Genome
challenger_fitness = challenger.get_fitness()
if winner_fitness < challenger_fitness:
loser = winner
winner = challenger
losers.append(loser)
else:
losers.append(challenger)
# print("winner:loser = {}:{}".format(winner.get_fitness(), [loser.get_fitness() for loser in losers]))
for loser in losers: # type: Genome
winners_gene = winner.copy_gene()
loser.set_gene(winners_gene)
loser.set_fitness(winner.get_fitness())
return
class World(object):
POSITION_X = 0
POSITION_Y = 1
def __init__(self, id=0):
self._id = id
self._width = WIDTH
self._height = HEIGHT
self._agent_radius = AGENT_RADIUS # エージェントの半径
self._agent_speed = 5
self._agent_step_theta = AGENT_STEP_THETA # (rad) 1stepでの最大回転角度(10度)
self._agent_sensor_strength_wall = SENSOR_LENGTH_WALL
self._agent_sensor_strength_food = SENSOR_LENGTH_FOOD
self._agent_sensor_strength_poison = SENSOR_LENGTH_POISON
self._food_point = 10
self._poison_point = -10
self._food_radius = FOOD_RADIUS
self._poison_radius = POISON_RADIUS
def init(self, foods, poisons):
self._agent_position = [WIDTH/2, HEIGHT/2] # スタート位置
self._agent_direction = 0 # 向いている方向(rad)
self._agent_fitness = 0
self._foods = [] # [[1, 1], [1, 2]] # foodの位置
for food in foods:
self._foods.append(list(food))
self._poisons = [] # [[10, 11], [12, 13]] # 毒位置
for poison in poisons:
self._poisons.append(list(poison))
@staticmethod
def meals(num, length):
arr = np.random.rand(num * 2) * length
meals = np.reshape(arr, (num, 2))
return meals.astype(np.int32)
def get_fitness(self):
return self._agent_fitness
def _move_length(self, left, right):
diff = right - left # 右方向が正
drive_strength = 1.0 - math.fabs(diff)
move_length = drive_strength * self._agent_speed
return move_length
def _rotate(self, left, right):
diff = left - right # 右方向が正
rotate_theta = diff * self._agent_step_theta
return rotate_theta
def move(self, output):
# 引数はNNの出力(output)
left, right = output
rotate = self._rotate(left, right)
move_length = self._move_length(left, right)
self._agent_direction += rotate
if math.pi * 2 < self._agent_direction:
self._agent_direction = self._agent_direction - math.pi * 2
elif self._agent_direction < 0:
self._agent_direction = self._agent_direction + math.pi * 2
diff_x = round(move_length * math.cos(self._agent_direction), 2) # 小数点2桁まで
diff_y = round(move_length * math.sin(self._agent_direction), 2) # 小数点2桁まで
current_x, current_y = self._agent_position
next_x = current_x + diff_x
next_y = current_y + diff_y
next_x = min(self._width, max(0, next_x))
next_y = min(self._height, max(0, next_y))
self._agent_position[self.POSITION_X] = next_x
self._agent_position[self.POSITION_Y] = next_y
# print("[{}, {}] -> [{}, {}]".format(current_x, current_y, next_x, next_y))
return next_x - current_x, next_y - current_y
def _sensor_diff(self, p1, p2):
p1x = p1[self.POSITION_X]
p1y = p1[self.POSITION_Y]
p2x = p2[self.POSITION_X]
p2y = p2[self.POSITION_Y]
distance = math.sqrt((p2x - p1x)**2 + (p2y - p1y)**2)
radian = math.atan2(p2y - p1y, p2x - p1x)
radian = radian if 0 <= radian else 2 * math.pi + radian
return distance, radian
def _get_min_sensor_diff0(self, target_arr, sensor_length):
distance, radian = min(target_arr, key=lambda x: x[0])
sensor_strength = 0.0
sensor_theta = 0.0
index = -1
if distance < sensor_length:
sensor_strength = (sensor_length - distance) / sensor_length
sensor_theta = radian
for i, item in enumerate(target_arr):
d, r = item
if d == distance and r == radian:
index = i
break
return sensor_strength, sensor_theta, index
def _get_min_sensor_diff(self, target_arr, sensor_length):
distance = 0
radian = 0
index = -1
agent_view_left = self._agent_direction - math.pi / 2
agent_view_right = self._agent_direction + math.pi / 2
for i, item in enumerate(target_arr):
d, r = item
if sensor_length <= d:
continue
if r <= agent_view_left or agent_view_right <= r:
continue
if index < 0:
index = i
distance = (sensor_length - d) / sensor_length
radian = (self._agent_direction - r) / (math.pi / 2)
elif d < distance:
index = i
distance = (sensor_length - d) / sensor_length
radian = (self._agent_direction - r) / (math.pi / 2)
return distance, radian, index
def _collision(self, target_arr, sensor_length):
index = -1
for i, item in enumerate(target_arr):
d, r = item
if sensor_length <= d:
continue
return i
return index
def eat(self, print_log=False):
# エージェントにぶつかったら食べる
pos = self._agent_position
eaten_food = None
# 餌との接触
if 0 < len(self._foods):
eat_area_food = self._agent_radius + self._food_radius
food_diff_arr = [self._sensor_diff(pos, food) for food in self._foods]
findex = self._collision(food_diff_arr, eat_area_food)
if 0 <= findex:
eaten_food = self._foods.pop(findex)
if print_log:
print("eat: food[{}]={}".format(findex, eaten_food))
self._agent_fitness += self._food_point
eaten_poison = None
# 毒との接触
if 0 < len(self._poisons):
eat_area_poison = self._agent_radius + self._food_radius
poison_diff_arr = [self._sensor_diff(pos, poison) for poison in self._poisons]
pindex = self._collision(poison_diff_arr, eat_area_poison)
if 0 <= pindex:
eaten_poison = self._poisons.pop(pindex)
if print_log:
print("eat: poison[{}]={}".format(pindex, eaten_poison))
self._agent_fitness += self._poison_point
return eaten_food, eaten_poison
def sensing(self):
pos = self._agent_position
x = pos[self.POSITION_X]
y = pos[self.POSITION_Y]
# 壁との距離と角度
wall_diff_arr = [self._sensor_diff(pos, wall) for wall in [[x, 0], [0, y], [x, self._height], [self._width, y]]]
wall_sensor_strength, wall_sensor_theta, _ = self._get_min_sensor_diff(wall_diff_arr,
self._agent_sensor_strength_wall)
# 餌との距離と角度
food_sensor_strength = 0.0
food_sensor_theta = 0.0
if 0 < len(self._foods):
food_diff_arr = [self._sensor_diff(pos, food) for food in self._foods]
food_sensor_strength, food_sensor_theta, _ = self._get_min_sensor_diff(food_diff_arr,
self._agent_sensor_strength_food)
# 毒との距離と角度
poison_sensor_strength = 0.0
poison_sensor_theta = INPUT_BIAS # bias
if 0 < len(self._poisons):
poison_diff_arr = [self._sensor_diff(pos, poison) for poison in self._poisons]
poison_sensor_strength, poison_sensor_theta, _ = self._get_min_sensor_diff(poison_diff_arr,
self._agent_sensor_strength_poison)
return np.array([wall_sensor_strength, wall_sensor_theta,
food_sensor_strength, food_sensor_theta,
poison_sensor_strength, poison_sensor_theta]).astype(np.float32)
def save_meal(ts, foods, poisons):
filename = './data/{}_m.npy'.format(ts)
meals = [foods, poisons]
np.save(filename, meals)
def load_meal(filename):
arr = np.load(filename)
foods, poisons = arr
return foods, poisons
def train(gp, generation, size, step):
ts = datetime.datetime.now().strftime("%H%M%S")
num_food = NUM_FOOD
num_poison = NUM_POISON
foods = World.meals(num_food, WIDTH)
poisons = World.meals(num_poison, WIDTH)
save_meal(ts, foods, poisons)
worlds = [World(i) for i in range(size)]
for i in range(generation):
print("# Generation: %d" % i)
if i % 10 == 0:
gp.save_population(ts)
# gp.print_all_genome()
# init world
gp.init_world()
for world in worlds:
world.init(foods.copy(), poisons.copy())
sess = tf.Session()
with sess.as_default():
tf.global_variables_initializer().run()
input = np.zeros(Genome.NUM_FEATURE * size)
for _ in range(step):
move_arr = []
# set input array
start = 0
for world in worlds:
inp = world.sensing()
end = start + len(inp)
input[start:end] = inp
start = end
input_placeholder = np.reshape(input, (size, 1, Genome.NUM_FEATURE))
command = gp.play(input_placeholder)
for index, world in enumerate(worlds):
cmd = command[index]
diff_x, diff_y = world.move(cmd)
move_arr.append([diff_x, diff_y])
world.eat()
# set fitness
for index, world in enumerate(worlds):
fit = world.get_fitness()
print("Genome[{}]: fitness={}".format(index, fit))
gp.set_fitness(index, fit)
gp.selection()
elite_index = gp.get_elite_index() # elete strategy
gp.mutation(elite_index=elite_index)
def _draw_circle(c0, food, color="#ffffff"):
x, y = food
x1 = x - FOOD_RADIUS / 2
y1 = y - FOOD_RADIUS / 2
x2 = x + FOOD_RADIUS / 2
y2 = y + FOOD_RADIUS / 2
tag = "food{}".format(food)
c0.create_oval(x1, y1, x2, y2, fill=color, outline=color, tags=tag)
def play(gp, size, step, file, id, meal_file):
foods, poisons = load_meal(meal_file)
# foods = World.meals(NUM_FOOD, WIDTH)
# poisons = World.meals(NUM_POISON, WIDTH)
worlds = [World(i) for i in range(size)]
gp.init_world()
for world in worlds:
world.init(foods.copy(), poisons.copy())
gp.load_population(file)
import Tkinter as tk
c0 = tk.Canvas(width=WIDTH, height=HEIGHT)
c0.pack()
# create agent
agent_tag = 'agent'
x1 = WIDTH / 2 - AGENT_RADIUS / 2
y1 = HEIGHT / 2 - AGENT_RADIUS / 2
x2 = WIDTH / 2 + AGENT_RADIUS / 2
y2 = HEIGHT / 2 + AGENT_RADIUS / 2
c0.create_oval(x1, y1, x2, y2, fill='#ff0000', tags=agent_tag)
# create food
for index, food in enumerate(foods):
x, y = food
x1 = x - FOOD_RADIUS / 2
y1 = y - FOOD_RADIUS / 2
x2 = x + FOOD_RADIUS / 2
y2 = y + FOOD_RADIUS / 2
tag = "food{}".format(food)
print("create food: {}".format(tag))
c0.create_oval(x1, y1, x2, y2, fill='#000000', tags=tag)
sess = tf.Session()
with sess.as_default():
tf.global_variables_initializer().run()
input = np.zeros(Genome.NUM_FEATURE * size)
for _ in range(step):
# set input array
start = 0
spy_inpu = None
for idx, world in enumerate(worlds):
inp = world.sensing()
if idx == id:
spy_inpu = inp
end = start + len(inp)
input[start:end] = inp
start = end
input_placeholder = np.reshape(input, (size, 1, Genome.NUM_FEATURE))
command = gp.play(input_placeholder)
cmd = command[id]
x, y = worlds[id].move(cmd)
print("in: {}, out: {}".format(spy_inpu, cmd))
# print("move=[{}, {}]".format(x, y))
food, poison = worlds[id].eat(print_log=True)
if food:
tag = "food{}".format(food)
print("delete:{}".format(tag))
# c0.delete(tag)
_draw_circle(c0, food)
time.sleep(0.1)
c0.move(agent_tag, x, y)
c0.update()
tk.mainloop()
def show(filename):
arr = np.load(filename)
for index, item in enumerate(arr):
print("{}:{}".format(index, list(item)))
tf.app.flags.DEFINE_string("command", "train", "train, play, show")
tf.app.flags.DEFINE_string("file", "./data/population.npy", "Population file")
tf.app.flags.DEFINE_string("meal", "./data/population_m.npy", "Meal file")
tf.app.flags.DEFINE_integer("index", 0, "Agent index")
def main(args):
flags = tf.app.flags.FLAGS
time_start = time.time()
np.random.seed(0)
generation = GENERATION
step = STEP # 各個体が何ステップ動くか
size = POPULATION_SIZE # Population size
gp = GenePool(size)
if flags.command == 'train':
train(gp, generation, size, step)
elif flags.command == 'play':
play(gp, size, step, flags.file, flags.index, flags.meal)
elif flags.command == 'show':
show(flags.file)
time_end = time.time()
print("time: {}s".format(time_end - time_start))
if __name__ == '__main__':
tf.app.run()
|
adamrocker/ishinomakihackathon2017
|
al/app/al.py
|
al.py
|
py
| 21,938 |
python
|
en
|
code
| 0 |
github-code
|
50
|
41877022835
|
#Emrich-Micahel Perrier
#Lab 16
from random import randrange
def roll():
num = randrange(1,7)
return num
def main():
ones = 0
twos = 0
threes = 0
for i in range (30):
num1 = roll()
print(num1, end=" ")
if num1 == 1:
ones += 1
elif num1 == 2:
twos += 1
elif num1 == 3:
threes += 1
print()
print("You have rolled", ones, "ones")
print("You have rolled", twos, "twos")
print("You have rolled", threes, "threes")
def main2():
counter=[0,0,0,0,0,0,0,0,0,0,0,0,0]
for i in range(1000):
red = roll()
green = roll()
r = red+green
counter[r] += 1
print(counter)
for i in range(len(counter)):
print(i, "\t",counter[i])
main2()
|
emrichmp/Python-Programs
|
DiceCounter.py
|
DiceCounter.py
|
py
| 801 |
python
|
en
|
code
| 0 |
github-code
|
50
|
29340197096
|
#!/usr/bin/python3
import sys
import io
if len(sys.argv)<3:
print('Podaj nazwe pliku txt wej i wyj oraz liczbe - 1 > Z Windows -> Unix')
print('2 Z Unix > Windows')
sys.exit(1)
with open(sys.argv[1], 'r') as file_input:
content = file_input.read()
if int(sys.argv[2]) == 1:
with open(sys.argv[1], 'w', newline='\n') as output:
output.write(content)
elif int(sys.argv[2]) == 2:
with open(sys.argv[1], 'w', newline='\r\n') as output:
output.write(content)
|
TryUnder/DeTryRepo
|
University/Developer_Environment/Bash/Python/Zad_8.2.py
|
Zad_8.2.py
|
py
| 499 |
python
|
en
|
code
| 0 |
github-code
|
50
|
71074366556
|
# -*- coding: utf-8 -*-
from environment import GraphicDisplay, Env
class ValueIteration:
def __init__(self, env):
# 환경 객체 생성
self.env = env
# 가치 함수를 2차원 리스트로 초기화
self.value_table = [[0.0] * env.width for _ in range(env.height)]
# 감가율
self.discount_factor = 0.9
# 가치 이터레이션
# 벨만 최적 방정식을 통해 다음 가치 함수 계산
def value_iteration(self):
next_value_table = [[0.0] * self.env.width for _ in
range(self.env.height)]
for state in self.env.get_all_states():
if state == [2, 2]:
next_value_table[state[0]][state[1]] = 0.0
continue
# 가치 함수를 위한 빈 리스트
value_list = []
# 가능한 모든 행동에 대해 계산
for action in self.env.possible_actions:
next_state = self.env.state_after_action(state, action)
reward = self.env.get_reward(state, action)
next_value = self.get_value(next_state)
value_list.append((reward + self.discount_factor * next_value))
# 최댓값을 다음 가치 함수로 대입
next_value_table[state[0]][state[1]] = round(max(value_list), 2)
self.value_table = next_value_table
# 현재 가치 함수로부터 행동을 반환
def get_action(self, state):
action_list = []
max_value = -99999
if state == [2, 2]:
return []
# 모든 행동에 대해 큐함수 (보상 + (감가율 * 다음 상태 가치함수))를 계산
# 최대 큐 함수를 가진 행동(복수일 경우 여러 개)을 반환
for action in self.env.possible_actions:
next_state = self.env.state_after_action(state, action)
reward = self.env.get_reward(state, action)
next_value = self.get_value(next_state)
value = (reward + self.discount_factor * next_value)
if value > max_value:
action_list.clear()
action_list.append(action)
max_value = value
elif value == max_value:
action_list.append(action)
return action_list
def get_value(self, state):
return round(self.value_table[state[0]][state[1]], 2)
if __name__ == "__main__":
env = Env()
value_iteration = ValueIteration(env)
grid_world = GraphicDisplay(value_iteration)
grid_world.mainloop()
|
rlcode/reinforcement-learning-kr
|
1-grid-world/2-value-iteration/value_iteration.py
|
value_iteration.py
|
py
| 2,586 |
python
|
ko
|
code
| 351 |
github-code
|
50
|
32592412403
|
import sys
def isPrime(n):
if n==1:
return False
else:
for i in range(2, int(n**0.5)+1):
if n%i == 0:
return False
return True
A,B = map(int,sys.stdin.readline().split())
answer = []
for x in range(A,B+1):
if isPrime(x):
answer.append(x)
for x in answer:
print(x)
|
san9w9n/2020_WINTER_ALGO
|
1929.py
|
1929.py
|
py
| 359 |
python
|
en
|
code
| 0 |
github-code
|
50
|
23751705994
|
import customtkinter as ctk
class LoadingBox(ctk.CTk):
def __init__(self, title: str = "Loading..."):
super().__init__()
self.title(title)
self.geometry("400x150")
self.resizable(False, False)
self.base_frame = ctk.CTkFrame(self)
self.base_frame.pack(fill="both", expand=True, padx=10, pady=10)
self.status_label = ctk.CTkLabel(self.base_frame, text="0%")
self.status_label.pack(fill="both", expand=True, padx=10, pady=10)
self.progress_bar = ctk.CTkProgressBar(self.base_frame)
self.progress_bar.pack(fill="both", expand=True, padx=10, pady=20)
def set_progress(self, progress: float, status: str = ""):
self.progress_bar.set(progress)
self.status_label.configure(text=f"{progress * 100:.0f}% {status}")
def start(self):
self.focus_force()
self.mainloop()
|
Tremirre/CassandraRentalApp
|
rental/ui/loading.py
|
loading.py
|
py
| 887 |
python
|
en
|
code
| 0 |
github-code
|
50
|
18304840633
|
from openerp.osv import fields,osv
from openerp.tools import sql
from openerp.tools.translate import _
import openerp.addons.decimal_precision as dp
import time
from datetime import datetime, date
from openerp.tools import DEFAULT_SERVER_DATE_FORMAT, DEFAULT_SERVER_DATETIME_FORMAT, float_compare
class tms_expense_analysis(osv.osv):
_name = "tms.expense.analysis"
_description = "Travel Expenses Analisys"
_auto = False
_rec_name = 'name'
_columns = {
'driver_helper' : fields.boolean('Driver Helper'),
'office_id' : fields.many2one('tms.office', 'Office', readonly=True),
'name' : fields.char('Name', size=64, readonly=True),
'date' : fields.date('Date', readonly=True),
'year' : fields.char('Year', size=4, readonly=True),
'day' : fields.char('Day', size=128, readonly=True),
'month' : fields.selection([('01',_('January')), ('02',_('February')), ('03',_('March')), ('04',_('April')),
('05',_('May')), ('06',_('June')), ('07',_('July')), ('08',_('August')), ('09',_('September')),
('10',_('October')), ('11',_('November')), ('12',_('December'))], 'Month',readonly=True),
'state' : fields.selection([
('draft', 'Draft'),
('approved', 'Approved'),
('confirmed', 'Confirmed'),
('cancel', 'Cancelled')
], 'State',readonly=True),
'employee_id' : fields.many2one('hr.employee', 'Driver', readonly=True),
'unit_id' : fields.many2one('fleet.vehicle', 'Unit', readonly=True),
'unit_char' : fields.char('Unidad', size=64, readonly=True),
'currency_id' : fields.many2one('res.currency', 'Currency', readonly=True),
'product_id' : fields.many2one('product.product', 'Line', readonly=True),
'expense_line_description' : fields.char('Description', size=256, readonly=True),
# 'travel_id' : fields.many2one('tms.travel', 'Travel', readonly=True),
# 'route_id' : fields.many2one('tms.route', 'Route', readonly=True),
# 'waybill_income' : fields.float('Waybill Amount', digits=(18,2), readonly=True),
# 'travels' : fields.integer('Travels', readonly=True),
'qty' : fields.float('Qty', digits=(18,2), readonly=True),
'price_unit' : fields.float('Price Unit', digits=(18,4), readonly=True),
'subtotal' : fields.float('SubTotal', digits=(18,2), readonly=True),
'operation_id' : fields.many2one('tms.operation', 'Operation', readonly=True),
}
# _order = "office_id, date_order, name"
def init(self, cr):
sql.drop_view_if_exists(cr, 'tms_expense_analysis')
cr.execute ("""
CREATE OR REPLACE VIEW tms_expense_analysis as
select b.id as id,
a.driver_helper,
a.office_id, a.name,
a.date,
to_char(date_trunc('day',a.date), 'YYYY') as year,
to_char(date_trunc('day',a.date), 'MM') as month,
to_char(date_trunc('day',a.date), 'YYYY-MM-DD') as day,
a.state, a.employee_id, a.unit_id, fv.name as unit_char, a.currency_id,
b.product_id, b.name expense_line_description,
b.product_uom_qty qty,
b.price_unit,
b.price_subtotal subtotal,
b.operation_id
from tms_expense a
inner join tms_expense_line b on a.id = b.expense_id
left join fleet_vehicle fv on fv.id=a.unit_id
--inner join tms_travel c on a.id = c.expense_id
where a.state <> 'cancel'
union
select b.id as id,
a.driver_helper,
a.office_id, a.name,
a.date,
to_char(date_trunc('day',a.date), 'YYYY') as year,
to_char(date_trunc('day',a.date), 'MM') as month,
to_char(date_trunc('day',a.date), 'YYYY-MM-DD') as day,
a.state, a.employee_id, a.unit_id, fv.name as unit_char, a.currency_id,
b.product_id, b.name expense_line_description,
b.product_uom_qty qty,
b.price_unit,
b.price_subtotal subtotal,
b.operation_id
from tms_expense a
inner join tms_expense_line b on a.id = b.expense_id
left join fleet_vehicle fv on fv.id=a.unit_id
--inner join tms_travel c on a.id = c.expense2_id
where a.state <> 'cancel'
order by office_id, name, date
;
""")
# vim:expandtab:smartindent:tabstop=4:softtabstop=4:shiftwidth=4:
|
jesramirez/tmsv8
|
tms_analysis/tms_expense_analysis.py
|
tms_expense_analysis.py
|
py
| 4,475 |
python
|
en
|
code
| 2 |
github-code
|
50
|
10733485967
|
from tkinter import *
from tkinter import ttk
from PIL import Image, ImageTk
from Image_Event import TakeRating
import getRecomentation as gR
from ClientR import *
root = Tk()
root.geometry('1000x700')
def page1R():
frame_2R.pack_forget()
frame_3R.pack_forget()
frame_4R.pack_forget()
frame_1R.pack()
panel_R.pack()
def page2R():
frame_1R.pack_forget()
frame_3R.pack_forget()
frame_4R.pack_forget()
frame_2R.pack()
panel_R.pack()
def page3R():
frame_1R.pack_forget()
frame_2R.pack_forget()
frame_4R.pack_forget()
frame_3R.pack()
panel_R.pack()
def page4R():
frame_1R.pack_forget()
frame_2R.pack_forget()
frame_3R.pack_forget()
frame_4R.pack()
panel_R.pack()
panel_R = PanedWindow(root, width=200, height=100)
l=Label(panel_R,text="Recomment Bassed On:")
l.grid(row=0, column=0)
page1btnR = Button(panel_R, text="Like Base", command=page1R)
page1btnR.grid(row=0, column=1)
page2btnR = Button(panel_R, text="SimpleItemCF Base", command=page2R)
page2btnR.grid(row=0, column=2)
page3btnR = Button(panel_R, text="SimpleUserCF Base", command=page3R)
page3btnR.grid(row=0, column=3)
page4btnR = Button(panel_R, text="ContentRecs Base", command=page4R)
page4btnR.grid(row=0, column=4)
out=Button(panel_R, text="logOut", command=lambda:me(root))
out.grid(row=0, column=5)
panel_R.pack()
frame_1R = Frame(root,width=200, height=200 )
rec1=gR.likedbased()
l=Label(frame_1R,text=rec1).pack()
frame_2R = Frame(root,width=200, height=200 )
rec2=gR.SimpleItemCF()
l2=Label(frame_2R,text=rec2).pack()
frame_3R = Frame(root,width=200, height=200 )
rec3=gR.SimpleUserCF()
l3=Label(frame_3R,text=rec3).pack()
rec4=gR.ContentRecs()
frame_4R = Frame(root,text=rec4,width=200, height=200 )
l4=Label(frame_4R).pack()
frame_1R.pack()
frame_1 = Frame(root,width=200, height=200 )
p=ImageTk.PhotoImage(Image.open("img/Toy_Story_poster.png").resize((200, 200), Image.ANTIALIAS))
b11=Button(frame_1, image=p,command=lambda:TakeRating(61))
b11.grid(row=0, column=0)
p2=ImageTk.PhotoImage(Image.open("img/HHW.jpg").resize((200, 200), Image.ANTIALIAS))
b12=Button(frame_1,image=p2,command=lambda:TakeRating(161582))
b12.grid(row=0, column=1)
p3=ImageTk.PhotoImage(Image.open("img/th.jpg").resize((200, 200), Image.ANTIALIAS))
b13=Button(frame_1, image=p3,command=lambda:TakeRating(161155))
b13.grid(row=0, column=2)
p4=ImageTk.PhotoImage(Image.open("img/th (1).jpg").resize((200, 200), Image.ANTIALIAS))
b14=Button(frame_1, image=p4,command=lambda:TakeRating(160567))
b14.grid(row=0, column=3)
p5=ImageTk.PhotoImage(Image.open("img/th (2).jpg").resize((200, 200), Image.ANTIALIAS))
b15=Button(frame_1, image=p5,command=lambda:TakeRating(160438))
b15.grid(row=0, column=4)
p6=ImageTk.PhotoImage(Image.open("img/th (3).jpg").resize((200, 200), Image.ANTIALIAS))
b16=Button(frame_1, image=p6,command=lambda:TakeRating(160080))
b16.grid(row=1, column=0)
p7=ImageTk.PhotoImage(Image.open("img/th (4).jpg").resize((200, 200), Image.ANTIALIAS))
b17=Button(frame_1, image=p7,command=lambda:TakeRating(159858))
b17.grid(row=1, column=1)
p8=ImageTk.PhotoImage(Image.open("img/th (5).jpg").resize((200, 200), Image.ANTIALIAS))
b18=Button(frame_1, image=p8,command=lambda:TakeRating(159690))
b18.grid(row=1, column=2)
p9=ImageTk.PhotoImage(Image.open("img/th (6).jpg").resize((200, 200), Image.ANTIALIAS))
b19=Button(frame_1, image=p9,command=lambda:TakeRating(158956))
b19.grid(row=1, column=3)
p10=ImageTk.PhotoImage(Image.open("img/th (7).jpg").resize((200, 200), Image.ANTIALIAS))
b110=Button(frame_1, image=p10,command=lambda:TakeRating(36))
b110.grid(row=1, column=4)
frame_2 = Frame(root, width=200, height=200 )
p11=ImageTk.PhotoImage(Image.open("img/th (8).jpg").resize((200, 200), Image.ANTIALIAS))
b11=Button(frame_2, image=p11,command=lambda:TakeRating(126))
b11.grid(row=0, column=0)
p12=ImageTk.PhotoImage(Image.open("img/th (9).jpg").resize((200, 200), Image.ANTIALIAS))
b12=Button(frame_2, image=p12,command=lambda:TakeRating(95720))
b12.grid(row=0, column=1)
p13=ImageTk.PhotoImage(Image.open("img/th (10).jpg").resize((200, 200), Image.ANTIALIAS))
b13=Button(frame_2, image=p13,command=lambda:TakeRating(93766))
b13.grid(row=0, column=2)
p14=ImageTk.PhotoImage(Image.open("img/th (11).jpg").resize((200, 200), Image.ANTIALIAS))
b14=Button(frame_2, image=p14,command=lambda:TakeRating(95165))
b14.grid(row=0, column=3)
p15=ImageTk.PhotoImage(Image.open("img/th (12).jpg").resize((200, 200), Image.ANTIALIAS))
b15=Button(frame_2, image=p15,command=lambda:TakeRating(95207))
b15.grid(row=0, column=4)
p16=ImageTk.PhotoImage(Image.open("img/th (13).jpg").resize((200, 200), Image.ANTIALIAS))
b16=Button(frame_2, image=p16,command=lambda:TakeRating(95307))
b16.grid(row=1, column=0)
p17=ImageTk.PhotoImage(Image.open("img/th (14).jpg").resize((200, 200), Image.ANTIALIAS))
b17=Button(frame_2, image=p17,command=lambda:TakeRating(95449))
b17.grid(row=1, column=1)
p18=ImageTk.PhotoImage(Image.open("img/th (15).jpg").resize((200, 200), Image.ANTIALIAS))
b18=Button(frame_2, image=p18,command=lambda:TakeRating(95510))
b18.grid(row=1, column=2)
p19=ImageTk.PhotoImage(Image.open("img/th (16).jpg").resize((200, 200), Image.ANTIALIAS))
b19=Button(frame_2, image=p19,command=lambda:TakeRating(95543))
b19.grid(row=1, column=3)
p20=ImageTk.PhotoImage(Image.open("img/th (17).jpg").resize((200, 200), Image.ANTIALIAS))
b110=Button(frame_2, image=p20,command=lambda:TakeRating(95583))
b110.grid(row=1, column=4)
frame_3 = Frame(root, width=200, height=200 )
p21=ImageTk.PhotoImage(Image.open("img/th (18).jpg").resize((200, 200), Image.ANTIALIAS))
b11=Button(frame_3, image=p21,command=lambda:TakeRating(95744))
b11.grid(row=0, column=0)
p22=ImageTk.PhotoImage(Image.open("img/th (19).jpg").resize((200, 200), Image.ANTIALIAS))
b12=Button(frame_3, image=p22,command=lambda:TakeRating(95965))
b12.grid(row=0, column=1)
p23=ImageTk.PhotoImage(Image.open("img/th (20).jpg").resize((200, 200), Image.ANTIALIAS))
b13=Button(frame_3, image=p23,command=lambda:TakeRating(96079))
b13.grid(row=0, column=2)
p24=ImageTk.PhotoImage(Image.open("img/th (21).jpg").resize((200, 200), Image.ANTIALIAS))
b14=Button(frame_3, image=p24,command=lambda:TakeRating(96373))
b14.grid(row=0, column=3)
p25=ImageTk.PhotoImage(Image.open("img/th (22).jpg").resize((200, 200), Image.ANTIALIAS))
b15=Button(frame_3, image=p25,command=lambda:TakeRating(99145))
b15.grid(row=0, column=4)
p26=ImageTk.PhotoImage(Image.open("img/th (23).jpg").resize((200, 200), Image.ANTIALIAS))
b16=Button(frame_3, image=p26,command=lambda:TakeRating(100083))
b16.grid(row=1, column=0)
p27=ImageTk.PhotoImage(Image.open("img/th (24).jpg").resize((200, 200), Image.ANTIALIAS))
b17=Button(frame_3, image=p27,command=lambda:TakeRating(100383))
b17.grid(row=1, column=1)
p28=ImageTk.PhotoImage(Image.open("img/th (25).jpg").resize((200, 200), Image.ANTIALIAS))
b18=Button(frame_3, image=p28,command=lambda:TakeRating(104218))
b18.grid(row=1, column=2)
p29=ImageTk.PhotoImage(Image.open("img/th (26).jpg").resize((200, 200), Image.ANTIALIAS))
b19=Button(frame_3, image=p29,command=lambda:TakeRating(106920))
b19.grid(row=1, column=3)
p30=ImageTk.PhotoImage(Image.open("img/th (27).jpg").resize((200, 200), Image.ANTIALIAS))
b110=Button(frame_3, image=p30,command=lambda:TakeRating(108715))
b110.grid(row=1, column=4)
frame_4 = Frame(root, width=200, height=200 )
p31=ImageTk.PhotoImage(Image.open("img/th (28).jpg").resize((200, 200), Image.ANTIALIAS))
b11=Button(frame_4, image=p31,command=lambda:TakeRating(109673))
b11.grid(row=0, column=0)
p32=ImageTk.PhotoImage(Image.open("img/th (29).jpg").resize((200, 200), Image.ANTIALIAS))
b12=Button(frame_4, image=p32,command=lambda:TakeRating(110102))
b12.grid(row=0, column=1)
p33=ImageTk.PhotoImage(Image.open("img/th (30).jpg").resize((200, 200), Image.ANTIALIAS))
b13=Button(frame_4, image=p33,command=lambda:TakeRating(111781))
b13.grid(row=0, column=2)
p34=ImageTk.PhotoImage(Image.open("img/th (31).jpg").resize((200, 200), Image.ANTIALIAS))
b14=Button(frame_4, image=p34,command=lambda:TakeRating(112112))
b14.grid(row=0, column=3)
p35=ImageTk.PhotoImage(Image.open("img/th (32).jpg").resize((200, 200), Image.ANTIALIAS))
b15=Button(frame_4, image=p35,command=lambda:TakeRating(112850))
b15.grid(row=0, column=4)
p36=ImageTk.PhotoImage(Image.open("img/th (33).jpg").resize((200, 200), Image.ANTIALIAS))
b16=Button(frame_4, image=p36,command=lambda:TakeRating(112852))
b16.grid(row=1, column=0)
p37=ImageTk.PhotoImage(Image.open("img/th (34).jpg").resize((200, 200), Image.ANTIALIAS))
b17=Button(frame_4, image=p37,command=lambda:TakeRating(113225))
b17.grid(row=1, column=1)
p38=ImageTk.PhotoImage(Image.open("img/th (35).jpg").resize((200, 200), Image.ANTIALIAS))
b18=Button(frame_4, image=p38,command=lambda:TakeRating(113829))
b18.grid(row=1, column=2)
p39=ImageTk.PhotoImage(Image.open("img/th (36).jpg").resize((200, 200), Image.ANTIALIAS))
b19=Button(frame_4, image=p39,command=lambda:TakeRating(114662))
b19.grid(row=1, column=3)
p40=ImageTk.PhotoImage(Image.open("img/th (37).jpg").resize((200, 200), Image.ANTIALIAS))
b110=Button(frame_4, image=p40,command=lambda:TakeRating(114762))
b110.grid(row=1, column=4)
def page1():
frame_2.pack_forget()
frame_3.pack_forget()
frame_4.pack_forget()
frame_1.pack()
panel_R.pack()
def page2():
frame_1.pack_forget()
frame_3.pack_forget()
frame_4.pack_forget()
frame_2.pack()
panel_R.pack()
def page3():
frame_2.pack_forget()
frame_1.pack_forget()
frame_4.pack_forget()
frame_3.pack()
panel_R.pack()
def page4():
frame_2.pack_forget()
frame_3.pack_forget()
frame_1.pack_forget()
frame_4.pack()
panel_R.pack()
panel_ .pack()
panel_ = PanedWindow(root, width=200, height=100)
page1btn = Button(panel_, text="Page 1", command=page1)
page1btn.grid(row=0, column=0)
page2btn = Button(panel_, text="Page 2", command=page2)
page2btn.grid(row=0, column=1)
page3btn = Button(panel_, text="Page 3", command=page3)
page3btn.grid(row=0, column=2)
page4btn = Button(panel_, text="Page 4", command=page4)
page4btn.grid(row=0, column=3)
panel_.pack()
frame_1.pack()
root.mainloop()
|
neerajrp1999/Movie-App-Including-recommender-system
|
Login/ClienPage/Home.py
|
Home.py
|
py
| 10,394 |
python
|
en
|
code
| 0 |
github-code
|
50
|
3210927870
|
import unittest
class MajorityElement(unittest.TestCase):
"""
Given an array nums of size n, return the majority element.
The majority element is the element that appears more than ⌊n / 2⌋ times.
You may assume that the majority element always exists in the array.
"""
def majority_element(self, nums):
"""
:type nums: List[int]
:rtype: int
"""
# alt solution
# nums.sort()
# return nums[len(nums)//2]
majority = len(nums) / 2
dic = {}
for n in nums:
dic[n] = dic.get(n, 0) + 1
if dic[n] > majority:
return n
def test_majority(self):
nums1 = [3,2,3]
nums2 = [2,2,1,1,1,2,2]
self.assertEqual(self.majority_element(nums1), 3)
self.assertEqual(self.majority_element(nums2), 2)
|
EugeneStill/PythonCodeChallenges
|
majority_element.py
|
majority_element.py
|
py
| 863 |
python
|
en
|
code
| 0 |
github-code
|
50
|
8542534731
|
from COCODataUtility import COCODataCategories, COCODataImage, COCODataAnnotation, COCODataWriter
categories = COCODataCategories()
categories.add_category("Cabinet_Handle")
categories.add_category("Cabinet_Door")
data_writer = COCODataWriter(categories)
image = COCODataImage(360, 640, 'angle13_Color.png')
segmentation = [221,70,241,72,239,76,232,78,236,107,245,108,244,114,236,116,228,115,228,110,233,109,228,77,221,78]
annotation = COCODataAnnotation(False, 'angle13_Color.png', segmentation, 'Cabinet_Handle')
data_writer.add_image(image)
data_writer.add_annotation(annotation)
segmentation = [209,25,415,17,402,139,224,151]
annotation = COCODataAnnotation(False, 'angle13_Color.png', segmentation, 'Cabinet_Door')
data_writer.add_annotation(annotation)
segmentation = [232,151,402,147,392,239,237,231]
annotation = COCODataAnnotation(False, 'angle13_Color.png', segmentation, 'Cabinet_Door')
data_writer.add_annotation(annotation)
image = COCODataImage(360, 640, 'angle10_Color.png')
segmentation = [229,82,247,80,249,86,244,87,247,94,250,119,253,123,250,129,244,129,237,129,234,126,237,121,246,120,243,98,241,92,237,88,232,90,228,87]
annotation = COCODataAnnotation(False, 'angle10_Color.png', segmentation, 'Cabinet_Handle')
data_writer.add_image(image)
data_writer.add_annotation(annotation)
segmentation = [215,35,406,14,393,131,231,169]
annotation = COCODataAnnotation(False, 'angle10_Color.png', segmentation, 'Cabinet_Door')
data_writer.add_annotation(annotation)
segmentation = [221,152,393,139,385,230,232,246]
annotation = COCODataAnnotation(False, 'angle10_Color.png', segmentation, 'Cabinet_Door')
data_writer.add_annotation(annotation)
data_writer.write_data('output.json')
|
JanusMaple/COCOData_Writer
|
COCODataUtility_Demo.py
|
COCODataUtility_Demo.py
|
py
| 1,701 |
python
|
en
|
code
| 2 |
github-code
|
50
|
32752491690
|
import json
from pprint import pprint
from bs4 import BeautifulSoup
# Read database data - after it has been encoded in json
json_data = open('db.json')
data = json.load(json_data)[0]['hubot:storage']
macros = json.loads(data)['macros']
json_data.close()
# Sort the data alphabetically
macros = sorted(macros, key=lambda k: k['macro'])
index = 0
count = 1
p_num = 1
while index < len(macros):
# Generate Page Header
page = '<!-- THIS FILE WAS AUTO-GENERATED - DO NOT EDIT THIS FILE -->'
page = page + '<div class="container-2 col-lg-12">';
# Add the next 16 macros to the page
while count <= 16 and index < len(macros):
page = page + '<div class="item col-md-3"><img class="img-responsive" src="' + macros[index]['url'] + '"/><div class="col-md-3 item-title">' + macros[index]['macro'] + '</div></div>'
count = count + 1
index = index + 1
page = page + '</div>'
# Write the html page
soup = BeautifulSoup(page)
fo = open('www/gen/page_' + str(p_num) + '.html', "w+")
fo.write(soup.prettify())
fo.close()
# Increment Variables for next page
p_num = p_num + 1
count = 1
# Build the paginator
with open('www/index.html', 'r') as myfile:
home = myfile.read()
home_page = BeautifulSoup(home)
paginator = home_page.find('ul', {"class":"pagination"})
paginator.clear()
paginator.append('<li id="back" data-min="1"><a href="#">«</a></li>')
paginator.append('<li id="1" class="p-btn active"><a href="#">1</a></li>')
cur_page = 2
while cur_page < p_num:
paginator.append('<li id="' + str(cur_page) + '" class="p-btn"><a href="#">' + str(cur_page) + '</a></li>')
cur_page = cur_page + 1;
paginator.append('<li id="next" data-max="' + str(p_num - 1) + '">' + '<a href="#">»</a></li>')
# Write new paginator
fo = open('www/index.html', "w+")
fo.write(home_page.prettify(formatter=None))
fo.close()
|
ericluii/hubot-webserver
|
macro_html_gen.py
|
macro_html_gen.py
|
py
| 1,854 |
python
|
en
|
code
| 1 |
github-code
|
50
|
26584824757
|
from confluent_kafka import Consumer, Message
from django.conf import settings
KAFKA_RUNNING: bool = True
def kafka_consumer_run() -> None:
conf = {
"bootstrap.servers": settings.KAFKA_BOOTSTRAP_SERVER,
"group.id": settings.KAFKA_GROUP_ID,
"auto.offset.reset": settings.KAFKA_OFFSET_RESET if hasattr(settings, "KAFKA_OFFSET_RESET") else "earliest",
}
consumer: Consumer = Consumer(conf)
topics: list[str] = [key for key, _ in settings.KAFKA_TOPICS.items()]
consumer.subscribe(topics)
try:
while KAFKA_RUNNING:
msg: Message = consumer.poll(1.0)
if msg is None:
continue
if msg.error():
print("Consumer error: {}".format(msg.error()))
continue
callback: str = settings.KAFKA_TOPICS.get(msg.topic())
if callback is None:
print("No callback found for topic: {}".format(msg.topic()))
continue
# call the callback string as function
dynamic_call_action(callback, consumer, msg)
except Exception as e:
print(e)
finally:
consumer.close()
def kafka_consumer_shutdown() -> None:
global KAFKA_RUNNING
KAFKA_RUNNING = False
def dynamic_call_action(action: str, consumer: Consumer, msg: Message) -> None:
# get path removing last part splited by dot
module_path: str = ".".join(action.split(".")[:-1])
# get path keeping last part splited by dot
function_name: str = action.split(".")[-1]
# import module
try:
module = __import__(module_path, fromlist=[function_name])
except:
print("No module found for action: {}".format(action))
return
# get function from module
try:
function = getattr(module, function_name)
except:
print("No function found for action: {}".format(action))
return
# call function
try:
function(consumer=consumer, msg=msg)
except:
print("Error calling action: {}".format(action))
return
|
luizSilva976/django_kafka
|
django_kafka/consumer.py
|
consumer.py
|
py
| 2,091 |
python
|
en
|
code
| null |
github-code
|
50
|
11053824580
|
from odoo.tests.common import TransactionCase
class TestSaleProject(TransactionCase):
@classmethod
def setUpClass(cls):
super().setUpClass()
cls.analytic_account_sale = cls.env['account.analytic.account'].create({
'name': 'Project for selling timesheet - AA',
'code': 'AA-2030'
})
# Create projects
cls.project_global = cls.env['project.project'].create({
'name': 'Global Project',
'analytic_account_id': cls.analytic_account_sale.id,
})
cls.project_template = cls.env['project.project'].create({
'name': 'Project TEMPLATE for services',
})
cls.project_template_state = cls.env['project.task.type'].create({
'name': 'Only stage in project template',
'sequence': 1,
'project_ids': [(4, cls.project_template.id)]
})
# Create service products
uom_hour = cls.env.ref('uom.product_uom_hour')
cls.product_order_service1 = cls.env['product.product'].create({
'name': "Service Ordered, create no task",
'standard_price': 11,
'list_price': 13,
'type': 'service',
'invoice_policy': 'order',
'uom_id': uom_hour.id,
'uom_po_id': uom_hour.id,
'default_code': 'SERV-ORDERED1',
'service_tracking': 'no',
'project_id': False,
})
cls.product_order_service2 = cls.env['product.product'].create({
'name': "Service Ordered, create task in global project",
'standard_price': 30,
'list_price': 90,
'type': 'service',
'invoice_policy': 'order',
'uom_id': uom_hour.id,
'uom_po_id': uom_hour.id,
'default_code': 'SERV-ORDERED2',
'service_tracking': 'task_global_project',
'project_id': cls.project_global.id,
})
cls.product_order_service3 = cls.env['product.product'].create({
'name': "Service Ordered, create task in new project",
'standard_price': 10,
'list_price': 20,
'type': 'service',
'invoice_policy': 'order',
'uom_id': uom_hour.id,
'uom_po_id': uom_hour.id,
'default_code': 'SERV-ORDERED3',
'service_tracking': 'task_in_project',
'project_id': False, # will create a project
})
cls.product_order_service4 = cls.env['product.product'].create({
'name': "Service Ordered, create project only",
'standard_price': 15,
'list_price': 30,
'type': 'service',
'invoice_policy': 'order',
'uom_id': uom_hour.id,
'uom_po_id': uom_hour.id,
'default_code': 'SERV-ORDERED4',
'service_tracking': 'project_only',
'project_id': False,
})
def test_sale_order_with_project_task(self):
SaleOrder = self.env['sale.order'].with_context(tracking_disable=True)
SaleOrderLine = self.env['sale.order.line'].with_context(tracking_disable=True)
partner = self.env['res.partner'].create({'name': "Mur en béton"})
sale_order = SaleOrder.create({
'partner_id': partner.id,
'partner_invoice_id': partner.id,
'partner_shipping_id': partner.id,
})
so_line_order_no_task = SaleOrderLine.create({
'name': self.product_order_service1.name,
'product_id': self.product_order_service1.id,
'product_uom_qty': 10,
'product_uom': self.product_order_service1.uom_id.id,
'price_unit': self.product_order_service1.list_price,
'order_id': sale_order.id,
})
so_line_order_task_in_global = SaleOrderLine.create({
'name': self.product_order_service2.name,
'product_id': self.product_order_service2.id,
'product_uom_qty': 10,
'product_uom': self.product_order_service2.uom_id.id,
'price_unit': self.product_order_service2.list_price,
'order_id': sale_order.id,
})
so_line_order_new_task_new_project = SaleOrderLine.create({
'name': self.product_order_service3.name,
'product_id': self.product_order_service3.id,
'product_uom_qty': 10,
'product_uom': self.product_order_service3.uom_id.id,
'price_unit': self.product_order_service3.list_price,
'order_id': sale_order.id,
})
so_line_order_only_project = SaleOrderLine.create({
'name': self.product_order_service4.name,
'product_id': self.product_order_service4.id,
'product_uom_qty': 10,
'product_uom': self.product_order_service4.uom_id.id,
'price_unit': self.product_order_service4.list_price,
'order_id': sale_order.id,
})
sale_order.action_confirm()
# service_tracking 'no'
self.assertFalse(so_line_order_no_task.project_id, "The project should not be linked to no task product")
self.assertFalse(so_line_order_no_task.task_id, "The task should not be linked to no task product")
# service_tracking 'task_global_project'
self.assertFalse(so_line_order_task_in_global.project_id, "Only task should be created, project should not be linked")
self.assertEqual(self.project_global.tasks.sale_line_id, so_line_order_task_in_global, "Global project's task should be linked to so line")
# service_tracking 'task_in_project'
self.assertTrue(so_line_order_new_task_new_project.project_id, "Sales order line should be linked to newly created project")
self.assertTrue(so_line_order_new_task_new_project.task_id, "Sales order line should be linked to newly created task")
# service_tracking 'project_only'
self.assertFalse(so_line_order_only_project.task_id, "Task should not be created")
self.assertTrue(so_line_order_only_project.project_id, "Sales order line should be linked to newly created project")
self.assertEqual(self.project_global._get_sale_order_items(), self.project_global.sale_line_id | self.project_global.tasks.sale_line_id, 'The _get_sale_order_items should returns all the SOLs linked to the project and its active tasks.')
sale_order_2 = SaleOrder.create({
'partner_id': partner.id,
'partner_invoice_id': partner.id,
'partner_shipping_id': partner.id,
})
sale_line_1_order_2 = SaleOrderLine.create({
'product_id': self.product_order_service1.id,
'product_uom_qty': 10,
'product_uom': self.product_order_service1.uom_id.id,
'price_unit': self.product_order_service1.list_price,
'order_id': sale_order_2.id,
})
task = self.env['project.task'].create({
'name': 'Task',
'sale_line_id': sale_line_1_order_2.id,
'project_id': self.project_global.id,
})
self.assertEqual(task.sale_line_id, sale_line_1_order_2)
self.assertIn(task.sale_line_id, self.project_global._get_sale_order_items())
self.assertEqual(self.project_global._get_sale_orders(), sale_order | sale_order_2)
def test_sol_product_type_update(self):
partner = self.env['res.partner'].create({'name': "Mur en brique"})
sale_order = self.env['sale.order'].with_context(tracking_disable=True).create({
'partner_id': partner.id,
'partner_invoice_id': partner.id,
'partner_shipping_id': partner.id,
})
self.product_order_service3.type = 'consu'
sale_order_line = self.env['sale.order.line'].create({
'order_id': sale_order.id,
'name': self.product_order_service3.name,
'product_id': self.product_order_service3.id,
'product_uom_qty': 5,
'product_uom': self.product_order_service3.uom_id.id,
'price_unit': self.product_order_service3.list_price
})
self.assertFalse(sale_order_line.is_service, "As the product is consumable, the SOL should not be a service")
self.product_order_service3.type = 'service'
self.assertTrue(sale_order_line.is_service, "As the product is a service, the SOL should be a service")
|
anhjean/beanbakery_v15
|
addons/sale_project/tests/test_sale_project.py
|
test_sale_project.py
|
py
| 8,446 |
python
|
en
|
code
| 5 |
github-code
|
50
|
22331897062
|
# mathesis.cup.gr course with title "Introduction to Python"
# Project: Tic Tac Toe
import random
import time
marker = {'Player 1': 'X', 'Player 2': 'O', }
def display_board(board):
#it prints the tic tac toe's state
cell = 0
for i in range(3):
firstLine = '+'
for j in range(53):
firstLine += '-'
firstLine += '+'
secondLine = ''
for j in range(3):
cell += 1
secondLine += '|' + str(cell)
for k in range(15):
secondLine += ' '
secondLine += '|'
thirdLine = ''
for j in range(3):
thirdLine += '|'
for k in range(8):
thirdLine += ' '
c = 3 * (i - 1) + j - 6
thirdLine += board[c]
for k in range(7):
thirdLine += ' '
thirdLine += '|'
fourthLine = ''
for j in range(3):
fourthLine += '|'
for k in range(16):
fourthLine += ' '
fourthLine += '|'
fifthLine = '\n+'
for j in range(53):
fifthLine += '-'
fifthLine += '+\n'
print(firstLine, '\n', secondLine, '\n', thirdLine, '\n', fourthLine, fifthLine)
def choose_first():
# it drews which player is going to play first
# it returns 'Player 1' or 'Player 2'
player = random.randint(1, 2)
return 'Player ' + str(player)
def display_score(score):
# it prints the final score
print('FINAL SCORE\nPlayer 1: {}\nPlayer 2: {}'.format(score.get('Player 1', 0), score.get('Player 2', 0)))
def place_marker(board, marker, position):
# it places the variable marker into board's position
board[position] = marker
def win_check(board,mark):
# it returns True if symbol mark has formed a tic tac toe
return (board[1] == mark and board[5] == mark and board[9] == mark) or \
(board[3] == mark and board[5] == mark and board[7] == mark) or \
(board[2] == mark and board[5] == mark and board[8] == mark) or \
(board[4] == mark and board[5] == mark and board[6] == mark) or \
(board[7] == mark and board[8] == mark and board[9] == mark) or \
(board[1] == mark and board[2] == mark and board[3] == mark) or \
(board[1] == mark and board[4] == mark and board[7] == mark) or \
(board[3] == mark and board[6] == mark and board[9] == mark)
def board_check(board):
# it returns False if there are still empty squares
# and True in the opposite case.
board[0] = '0'
for b in board:
if b == ' ':
return False
return True
def player_choice(board, turn):
# The player that variable turn represents, chooses a square
# It returns an integer in the space [1, 9]
# Here will be checked if there is already a value inside the square
while True:
number = input(turn + '[ ' + marker[turn] + ' ]: Choose a square: (1-9) ')
# it is checked if input is an int
try:
numberInt = int(number)
except:
continue
else:
# it is checked if it belongs in the allowed space
if numberInt < 1 or numberInt > 9:
continue
number = int(number)
# finally it is checked if the corresponding cell is empty
if board[number] == ' ':
return number
else:
print('The chosen square is occupied')
def replay():
# it asks the user if he wants to play again and it returns True if it is so
while True:
ans = input('Do you want to play again? (Yes/No)').lower().strip()
if ans == 'yes':
return True
elif ans == 'no':
return False
def next_player(turn):
# it returns the next player that plays
split = turn.split()
if split[1] == '1':
return split[0] + ' 2'
return split[0] + ' 1'
def main():
score = {} # a dictionary with the players' score
print('Let\'s start!\nBecomes lottery ', end = '')
for t in range(10):
print(".", flush='True', end=' ')
time.sleep(0.2)
print()
# variable turn is referred to the player that plays
turn = choose_first()
print("\n" + turn + ' plays first.')
# variable first is referred to the player that played first
first = turn
game_round = 1 # game round
while True:
# new game
theBoard = [' '] * 10
game_on = True # game starts
while game_on:
display_board(theBoard) # display tic tac toe
# player turn chooses a position
position = player_choice(theBoard, turn)
# is placed his choice
place_marker(theBoard, marker[turn], position)
if win_check(theBoard, marker[turn]): # a check if he has won
display_board(theBoard)
print(turn + ' won')
score[turn] = score.get(turn, 0) + 1
game_on = False
# a check if tableau has filled without a winner
elif board_check(theBoard):
display_board(theBoard)
print('Draw!')
game_on = False
else: # else we continue with next player's move
turn = next_player(turn)
if not replay():
ending = ''
if game_round>1 : ending = 's'
print("After {} round{}".format(game_round, ending))
display_score(score) # exit ... final score
break
else :
game_round += 1
# in next game the other player begins
turn = next_player(first)
first = turn
main()
|
theomeli/Mathesis-apps
|
tic tac toe/tic_tac_toe.py
|
tic_tac_toe.py
|
py
| 5,744 |
python
|
en
|
code
| 0 |
github-code
|
50
|
27275885645
|
import json
def get_command_help_string(serverid, userlevel, commandname):
with open('servers.json', 'r') as f:
servers = json.load(f)
servername = servers[f'sid{serverid}']['servername']
disabledcommands = servers[f'sid{serverid}']['disabledcommands']
try:
customcommands = servers[f'sid{serverid}']['customcommands']
except KeyError:
customcommands = []
prefix = servers[f'sid{serverid}']['prefix']
# 0 = Everyone
# 1 = Mod
# 2 = Admin
# 3 = Server Owner
# 4 = Bot Owner
if commandname == 'setprefix':
messagestr = f'`{prefix}setprefix [prefix] <server|default>`: ' + \
'Changes the bot command prefix. (userlevel: 2)\n' + \
'`[prefix]`: What to change the prefix to.\n' + \
'`<server|default>`: Specify whether or not to change the ' + \
'server\'s command prefix, or the default prefix. If omitted, ' + \
'defaults to `server`. (userlevel: 4)'
elif commandname == 'setulrolenames':
messagestr = f'`{prefix}setulrolenames [modrole] <adminrole>`: ' + \
'Changes the moderator/admin role names. (userlevel: 2)\n' + \
'`[modrole]`: The moderator rolename.\n' + \
'`<adminrole>`: The admin role name. If omitted, ' + \
'defaults to whatever the current admin role name is.'
elif commandname == 'addquote':
messagestr = f'`{prefix}addquote [quote ... ]`: ' + \
'Adds a quote to the list. (userlevel: 1)\n' + \
'`[quote ... ]`: The quote to add.'
elif commandname == 'delquote':
messagestr = f'`{prefix}delquote [index|all]`: ' + \
'Removes a quote from the list. (userlevel: 1)\n' + \
'`[index|all]`: Either a number corrosponding to the index ' + \
'of the quote to be removed, or `all` (which deletes all quotes). '
elif commandname == 'quote':
messagestr = f'`{prefix}quote <index|list>`: Prints a quote from the list. (userlevel: 0)\n' + \
'`<index|list>: Either a number corrosponding to the index of ' + \
'the quote to be printed, or `list` (which PMs the user the quote list). ' + \
'If ommitted, choses a random quote.'
elif commandname == '8ball':
messagestr = f'`{prefix}8ball [question ... ]`: ' + \
'Prints out a random Magic 8-Ball response. (userlevel: 0)\n' + \
'`[question ... ]`: The question to ask the Magic 8-Ball.'
elif commandname == 'help':
messagestr = f'`{prefix}help <command>`: ' + \
'PMs the user information about the commands this bot supports. (userlevel: 0)\n' + \
'`<command>`: A command to view information about. If ommitted, ' + \
'PMs the user a list of commands that they can use.'
elif commandname == 'toggle':
messagestr = f'`{prefix}toggle [command]`: ' + \
'Toggles on/off the specified command on the server. (userlevel: 2)\n' + \
'`[command]`: The command to toggle, without the prefix.'
elif commandname == 'addcom':
messagestr = f'`{prefix}addcom simple [userlevel] [reply-in-pm] [content ... ]`: ' + \
'Adds a simple custom command to the server. (userlevel: 2)\n' + \
'`[name]`: The name of the command, without prefix.\n' + \
'`[userlevel]`: An integer corrosponding to the minimum userlevel ' + \
'required to use the command. `0` for everyone, `1` for mod, `2` for admin, ' + \
'`3` for server owner, and `4` for bot owner.\n' + \
'`[reply-in-pm]`: Either `1` or `0`. If `1`, the command will reply to the user ' + \
'in a PM rather than in the channel the command was used.\n' + \
'`[content ... ]`: The content the command will print when used.\n\n'
messagestr += f'`{prefix}addcom quotesys [name] [userlevel] [addcomname] ' + \
'[addcomuserlevel] [delcomname] [delcomuserlevel]`: ' + \
'Adds a custom quote system to the server. (userlevel: 2)\n' + \
'`[name]`: The name of the quote system, without command prefix.\n' + \
'`[userlevel]`: The minimum userlevel required to use the quote command.\n' + \
'`[addcomname]`: The name of the addquote command, without prefix..\n' + \
'`[addcomuserlevel]`: The minimum userlevel required to use the addquote command.\n' + \
'`[delcomname]`: The name of the delquote command, without prefix..\n' + \
'`[delcomuserlevel]`: The minimum userlevel required to use the delquote command.\n\n'
messagestr += f'`{prefix}addcom quote [name] [userlevel]`: ' + \
'Adds a custom quote system to the server without adding the ' + \
'addquote and delquote commands. (userlevel: 2)\n' + \
'`[name]`: The name of the quote system, without command prefix.\n' + \
'`[userlevel]`: The minimum userlevel required to use the command. '
messagestr += f'`{prefix}addcom addquote [name] [userlevel] [quotesys]`: ' + \
'Adds an addquote command for a custom quote system. (userlevel: 2)\n' + \
'`[name]`: The name of the command, without prefix.\n' + \
'`[userlevel]`: The minimum userlevel required to use the command.' + \
'`[quotesys]`: The name of the custom quote system this command will edit.\n\n'
messagestr += f'`{prefix}addcom delquote [name] [userlevel] [quotesys]`: ' + \
'Adds an delquote command for a custom quote system. (userlevel: 2)\n' + \
'`[name]`: The name of the command, without prefix.\n' + \
'`[userlevel]`: The minimum userlevel required to use the command.' + \
'`[quotesys]`: The name of the custom quote system this command will edit.'
elif commandname == 'delcom':
messagestr = f'`{prefix}delcom [command]`: ' + \
'Removes a custom command from the server. (userlevel: 2)\n' + \
'`[command]`: The command to remove, without the prefix.'
elif commandname == 'test':
messagestr = f'`{prefix}test <args ... >`: Prints the arguments specified. (userlevel: 0)\n' + \
'`<args ... >`: The args to print.'
elif commandname == 'tf':
messagestr = f'`{prefix}tf`: Flip some tables. (╯°□°)╯︵ ┻━┻ (userlevel: 0)'
elif commandname == 'eval':
messagestr = f'`{prefix}eval [expression ... ]`: \n' + \
'Takes the provided Python expression, `eval`s it, and shows the output. ' + \
'(userlevel: 4)' + \
'`[expression ... ]`: The expression to evaluate.'
elif commandname == 'exec':
messagestr = f'`{prefix}exec [code ... ]`: \n' + \
'Takes the provided Python code, `exec`s it, and shows the output. (userlevel: 4)' + \
'`[code ... ]`: The code to execute.'
elif commandname == 'userlevel':
messagestr = f'`{prefix}userlevel`: Shows your userlevel. (userlevel: 0)\n'
elif commandname == 'stats':
messagestr = f'`{prefix}stats`: Shows some stats about the bot. (userlevel: 0)\n' + \
'The stats shown: how many servers the bot is in, how many users ' + \
'are online, how many times bot commands have been used, and the bot uptime.'
elif commandname == 'src':
messagestr = f'`{prefix}src [game] [category ... ]`: Gets the speedrun.com WR for a given game' + \
'and category. (userlevel: 0)\n' + \
'`[game]`: The game to get the WR for.\n' + \
'`[category ... ]`: The category to get the WR for.\n' + \
'Note that the category names are case-sensitive.'
elif commandname != None:
messagestr = 'There\'s no help informaiton available for that command. Either the command ' + \
'just plain doesn\'t exist, or it\'s a server-specific custom command.'
elif commandname == None:
messagestr = f'**Unobtainibot commands available to you in {servername}**\n' + \
f'For more information on these commands, use `{prefix}help <command>`\n\n'
if userlevel >= 4:
messagestr += f'`[4] {prefix}eval`: Takes the provided Python expression and `eval`s it.\n'
messagestr += f'`[4] {prefix}exec`: Takes the provided Python code, and `exec`s it.\n'
if userlevel >= 2:
messagestr += f'`[2] {prefix}changeprefix`: Changes the bot command prefix.\n'
messagestr += f'`[2] {prefix}setulrolenames`: Changes the admin/mod role names.\n'
messagestr += f'`[2] {prefix}toggle`: Toggles a command on or off.\n'
messagestr += f'`[2] {prefix}addcom`: Adds a custom command to the server.\n'
messagestr += f'`[2] {prefix}delcom`: Removes a custom command from the server.\n'
if userlevel >= 1:
messagestr += f'`[1] {prefix}addquote`: Adds a quote to the quote list.\n'
messagestr += f'`[1] {prefix}delquote`: Removes a quote from the quote list.\n'
if userlevel >= 0:
messagestr += f'`[0] {prefix}help`: PMs the user info about the commands this bot supports.\n'
if 'quote' not in disabledcommands:
messagestr += f'`[0] {prefix}quote`: Prints a quote from the list.\n'
elif userlevel >= 2:
messagestr += f'~~`[0] {prefix}quote`: Prints a quote from the list.~~\n'
if '8ball' not in disabledcommands:
messagestr += f'`[0] {prefix}8ball`: Prints a random Magic 8-Ball response.\n'
elif userlevel >= 2:
messagestr += f'~~`[0] {prefix}8ball`: Prints a random Magic 8-Ball response.~~\n'
if 'test' not in disabledcommands:
messagestr += f'`[0] {prefix}test`: Prints the arguments specfied.\n'
elif userlevel >= 2:
messagestr += f'~~`[0] {prefix}test`: Prints the arguments specfied.~~\n'
if 'tf' not in disabledcommands:
messagestr += f'`[0] {prefix}tf`: Flips some tables. (╯°□°)╯︵ ┻━┻\n'
elif userlevel >= 2:
messagestr += f'~~`[0] {prefix}tf`: Flips some tables. (╯°□°)╯︵ ┻━┻~~\n'
if 'userlevel' not in disabledcommands:
messagestr += f'`[0] {prefix}userlevel`: Shows your userlevel.\n'
elif userlevel >= 2:
messagestr += f'~~`[0] {prefix}userlevel`: Shows your userlevel.~~\n'
if 'stats' not in disabledcommands:
messagestr += f'`[0] {prefix}stats:` Shows some stats about the bot.\n'
elif userlevel >= 2:
messagestr += f'~~`[0] {prefix}stats:` Shows some stats about the bot.~~\n'
if 'src' not in disabledcommands:
messagestr += f'`[0] {prefix}src:` Gets the speedrun.com WR for a given game and category.\n'
elif userlevel >= 2:
messagestr += f'~~`[0] {prefix}src:` Gets the speedrun.com WR for a given game and category.~~\n'
# custom commands
for command in customcommands:
if command['name'] not in disabledcommands:
if userlevel >= int(command['userlevel']):
if command['type'] == 'simple':
messagestr += f'`[{command["userlevel"]}] ' + \
f'{prefix}{command["name"]}`: Simple custom command.\n'
elif command['type'] == 'quote' or command['type'] == 'quotesys':
messagestr += f'`[{command["userlevel"]}] ' + \
f'{prefix}{command["name"]}`: Custom quote system.\n'
elif command['type'] == 'addquote':
messagestr += f'`[{command["userlevel"]}] ' + \
f'{prefix}{command["name"]}`: Add quote to custom quote system ' + \
f'{command["content"]}.\n'
elif command['type'] == 'delquote':
messagestr += f'`[{command["userlevel"]}] ' + \
f'{prefix}{command["name"]}`: Remove quote from custom quote system ' + \
f'{command["content"]}.\n'
elif userlevel >= 2:
if userlevel >= int(command['userlevel']):
if command['type'] == 'simple':
messagestr += f'~~`[{command["userlevel"]}] ' + \
f'{prefix}{command["name"]}`: Simple custom command.~~\n'
elif command['type'] == 'quote' or command['type'] == 'quotesys':
messagestr += f'~~`[{command["userlevel"]}] ' + \
f'{prefix}{command["name"]}`: Custom quote system.~~\n'
elif command['type'] == 'addquote':
messagestr += f'~~`[{command["userlevel"]}] ' + \
f'{prefix}{command["name"]}`: Add quote to custom quote system ' + \
f'{command["content"]}.~~\n'
elif command['type'] == 'delquote':
messagestr += f'~~`[{command["userlevel"]}] ' + \
f'{prefix}{command["name"]}`: Remove quote from custom quote system ' + \
f'{command["content"]}.~~\n'
return messagestr
|
Tiyenti/unobtainibot
|
commandhelp.py
|
commandhelp.py
|
py
| 14,255 |
python
|
en
|
code
| 1 |
github-code
|
50
|
7469977931
|
from korea_public_data.core.choices import ResponseType
from korea_public_data.core.vars import default as var
from korea_public_data.core.consts import data as const
from korea_public_data.data.base import PublicDataBase
class GetCovid19InfStateJson(PublicDataBase):
"""공공데이터활용지원센터_보건복지부 코로나19 감염 현황"""
def __init__(self, service_key: str):
# 상위 클래스 변수 적용
super().__init__()
# 필수 설정
self.service_key = service_key
# 변수(기본 값 적용)
self.yesterday = var.DEFAULT_YESTERDAY_SEOUL_TIMEZONE
self.start_at = self.yesterday.strftime("%Y%m%d")
self.end_at = self.yesterday.strftime("%Y%m%d")
self.page_no = var.DEFAULT_PAGE_NO
self.num_of_rows = var.DEFAULT_PAGE_NUM_OF_ROWS
# 상수
self.response_type = ResponseType.XML
@property
def url(self):
return (
f'{const.COVID_INFECTION_STATUS_URL}'
f'startCreateDt={self.start_at}&'
f'endCreateDt={self.end_at}&'
f'pageNo={self.page_no}&'
f'numOfRows={self.num_of_rows}&'
f'serviceKey={self.service_key}'
)
def _data_valid(self):
"""데이터 이상여부 확인"""
assert self.service_key, "서비스 키가 등록되지 않았습니다."
|
lee-lou2/korea-public-data
|
data/data_go_kr/covid_infection_status.py
|
covid_infection_status.py
|
py
| 1,384 |
python
|
en
|
code
| 18 |
github-code
|
50
|
70425535517
|
""" personalize neural architectures using data from test subjects
This script retrains a pretrained neural network using additional data from test subjects. The pretrained network resulted
from a PPG based training by the script 'ppg_training_mimic_iii.py'. Additional data can be the first 20 % of the test
subject's data or be comprised of randomly drawn 20 %. Validation is performed using the remaining 80 % of the data. The
script performs this personalization for a defined number of subjects separately and stores the results for further
analysis.
File: prepare_MIMIC_dataset.py
Author: Dr.-Ing. Fabian Schrumpf
E-Mail: [email protected]
Date created: 8/10/2021
Date last modified: 8/10/2021
"""
from os.path import join, expanduser, isfile
from functools import partial
import argparse
import tensorflow as tf
gpu_devices = tf.config.experimental.list_physical_devices("GPU")
for device in gpu_devices:
tf.config.experimental.set_memory_growth(device, True)
from tensorflow.keras.layers import ReLU
from kapre import STFT, Magnitude, MagnitudeToDecibel
import numpy as np
from sklearn.model_selection import train_test_split
import pandas as pd
def read_tfrecord(example, win_len=1875):
tfrecord_format = (
{
'ppg': tf.io.FixedLenFeature([win_len], tf.float32),
'label': tf.io.FixedLenFeature([2], tf.float32),
'subject_idx': tf.io.FixedLenFeature([1], tf.float32)
}
)
parsed_features = tf.io.parse_single_example(example, tfrecord_format)
return parsed_features['ppg'], (parsed_features['label'][0], parsed_features['label'][1]), parsed_features['subject_idx']
def create_dataset(tfrecords_dir, tfrecord_basename, win_len=1875, batch_size=32, modus='train'):
pattern = join(tfrecords_dir, modus, tfrecord_basename + "_" + modus + "_?????_of_?????.tfrecord")
dataset = tf.data.TFRecordDataset.list_files(pattern)
if modus == 'train':
dataset = dataset.shuffle(100, reshuffle_each_iteration=True)
dataset = dataset.interleave(
tf.data.TFRecordDataset,
cycle_length=800,
block_length=100)
else:
dataset = dataset.interleave(
tf.data.TFRecordDataset)
dataset = dataset.map(partial(read_tfrecord, win_len=win_len), num_parallel_calls=4)
dataset = dataset.shuffle(2048, reshuffle_each_iteration=True)
dataset = dataset.prefetch(buffer_size=tf.data.AUTOTUNE)
dataset = dataset.batch(batch_size, drop_remainder=False)
dataset = dataset.repeat()
return dataset
def ppg_personalization_mimic_iii(DataDir,
ResultsDir,
ModelFile,
CheckpointDir,
tfrecord_basename,
experiment_name,
win_len=875,
batch_size=32,
lr = None,
N_epochs = 40,
Nsamp=2.5e5,
Ntrials = 30,
RandomPick = True):
pd_col_names = ['subject', 'SBP_true', 'DBP_true', 'SBP_est_prepers', 'DBP_est_prepers', 'SBP_est_postpers', 'DBP_est_postpers']
results = pd.DataFrame([], columns=pd_col_names)
experiment_name = experiment_name + '_pers'
# Load the test set from the .tfrecord files and save it as a .npz file for easier access
if isfile(join(DataDir, experiment_name + "_dataset.npz")):
npz_file = np.load(join(DataDir, experiment_name + "_dataset.npz"))
ppg = npz_file['arr_0']
BP = npz_file['arr_1']
subject_idx = npz_file['arr_2']
else:
# Load test dataset for personalization
dataset = create_dataset(DataDir, tfrecord_basename, win_len=win_len, batch_size=batch_size, modus='test')
dataset = iter(dataset)
ppg = np.empty(shape=(int(Nsamp), int(win_len)))
BP = np.empty(shape=(int(Nsamp), 2))
subject_idx = np.empty(shape=(int(Nsamp)))
for i in range(int(Nsamp) // int(batch_size)):
ppg_batch, BP_batch, subject_idx_batch = dataset.get_next()
ppg[i * batch_size:(i + 1) * batch_size, :] = ppg_batch.numpy()
BP[i * batch_size:(i + 1) * batch_size, :] = np.transpose(np.asarray(BP_batch))
subject_idx[i * batch_size:(i + 1) * batch_size] = np.squeeze(subject_idx_batch.numpy())
np.savez(join(DataDir, experiment_name + "_dataset.npz"), ppg, BP, subject_idx,['ppg', 'BP', 'subject_idx'])
# draw test subjects randomly and save their ID for reproducibility
subjects = np.unique(subject_idx)
if isfile(join(ResultsDir,'ppg_personalization_subject_list.txt')):
file = open(join(ResultsDir,'ppg_personalization_subject_list.txt'),'r')
trial_subjects = file.read()
trial_subjects = [int(float(i)) for i in trial_subjects.split('\n')[:-1]]
else:
trial_subjects = np.random.choice(subjects, size=Ntrials, replace=False)
with open(join(ResultsDir,'ppg_personalization_subject_list.txt'),'w') as f:
for item in trial_subjects:
f.write(("%s\n" % item))
# perform personalization for each test subject
for subject in trial_subjects:
print(f'Processing subject {subject} of {len(trial_subjects)}')
ppg_trial = ppg[subject_idx==subject,:]
BP_trial = BP[subject_idx==subject,:]
Nsamp_trial = BP_trial.shape[0]
N_train = int(np.round(0.2*Nsamp_trial))
idx_test = np.arange(N_train+1,Nsamp_trial,2)
ppg_test = ppg_trial[idx_test,:]
BP_test = BP_trial[idx_test,:]
ppg_trial = np.delete(ppg_trial, idx_test, axis=0)
BP_trial = np.delete(BP_trial, idx_test, axis=0)
# draw training data from the test subjct's data
if RandomPick==True:
idx_train, idx_val = train_test_split(range(ppg_trial.shape[0]), test_size=int(N_train), shuffle=True)
ppg_train = ppg_trial[idx_train,:]
BP_train = BP_trial[idx_train,:]
ppg_val = ppg_trial[idx_val,:]
BP_val = BP_trial[idx_val,:]
else:
ppg_train = ppg_trial[:N_train, :]
BP_train = BP_trial[:N_train, :]
ppg_val = ppg_trial[:N_train, :]
BP_val = BP_trial[:N_train, :]
# load model dependencies
dependencies = {
'ReLU': ReLU,
'STFT': STFT,
'Magnitude': Magnitude,
'MagnitudeToDecibel': MagnitudeToDecibel
}
model = tf.keras.models.load_model(ModelFile, custom_objects=dependencies)
# retrain only the last 7 layers
for layer in model.layers[:-7]:
layer.trainable = False
if lr is None:
opt = tf.keras.optimizers.Adam()
else:
opt = tf.keras.optimizers.Adam(learning_rate=lr)
model.compile(
optimizer=opt,
loss=tf.keras.losses.mean_squared_error,
metrics=[['mae'], ['mae']]
)
checkpoint_cb = tf.keras.callbacks.ModelCheckpoint(
filepath=CheckpointDir + experiment_name + '.h5',
save_best_only=True,
save_weights_only=True
)
EarlyStopping_cb = tf.keras.callbacks.EarlyStopping(
monitor='val_loss',
patience=5,
restore_best_weights=True
)
# prediction on the test data prior to personalization
SBP_val_prepers, DBP_val_prepers = model.predict(ppg_test)
SBP_train = BP_train[:, 0]
DBP_train = BP_train[:, 1]
SBP_val = BP_val[:, 0]
DBP_val = BP_val[:, 1]
# perform personalization using 20% of the test subject's data
history = model.fit(x=ppg_train, y=(SBP_train, DBP_train),
epochs=N_epochs,
batch_size=batch_size,
shuffle=True,
validation_data=(ppg_val, (SBP_val, DBP_val)),
callbacks=[checkpoint_cb, EarlyStopping_cb])
# prediction on the test data after personalization
model.load_weights(checkpoint_cb.filepath)
SBP_val_postpers, DBP_val_postpers = model.predict(ppg_test)
# save predictions for later analysis
results = results.append(pd.DataFrame(np.concatenate((
subject*np.ones(shape=(BP_test.shape[0],1)),
np.expand_dims(BP_test[:,0], axis=1),
np.expand_dims(BP_test[:,1], axis=1),
SBP_val_prepers,
DBP_val_prepers,
SBP_val_postpers,
DBP_val_postpers
),axis=1), columns=pd_col_names))
if RandomPick == True:
results.to_csv(join(ResultsDir, experiment_name + '_random.csv'))
else:
results.to_csv(join(ResultsDir, experiment_name + '_first.csv'))
if __name__ == "__main__":
parser = argparse.ArgumentParser()
parser.add_argument('ExpName', type=str, help="Name of the training preceeded by the repsective date in the format MM-DD-YYYY")
parser.add_argument('DataDir', type=str, help="folder containing the train, val and test subfolders containing tfrecord files")
parser.add_argument('ResultsDir', type=str, help="Directory in which results are stored")
parser.add_argument('ModelPath', type=str, help="Path where the model file used for personalization is located")
parser.add_argument('chkptdir', type=str, help="directory used for storing model checkpoints")
parser.add_argument('--lr', type=float, default=0.003, help="initial learning rate (default: 0.003)")
parser.add_argument('--batch_size', type=int, default=32, help="batch size used for training (default: 32)")
parser.add_argument('--winlen', type=int, default=875, help="length of the ppg windows in samples (default: 875)")
parser.add_argument('--epochs', type=int, default=1000, help="maximum number of epochs for training (default: 60)")
parser.add_argument('--nsubj', type=int, default=20, help="Number subjects used for personalization (default :20)")
parser.add_argument('--randompick', type=int, default=0, help="define wether data for personalization is drawn randomly (1) or comprises the first 20 %% of the test subject's data (0) (default: 0)")
args = parser.parse_args()
tfrecord_basename = 'MIMIC_III_ppg'
ExpName = args.ExpName
DataDir = args.DataDir
ResultsDir = args.ResultsDir
ModelPath = args.ModelPath
CheckpointDir = args.chkptdir
win_len = args.winlen
lr = args.lr
N_epochs = args.epochs
N_trials = args.nsubj
RandomPick = True if args.randompick == 1 else False
ModelFile = join(ModelPath, ExpName + '_cb.h5')
ppg_personalization_mimic_iii(DataDir,
ResultsDir,
ModelFile,
CheckpointDir,
tfrecord_basename,
ExpNamewin_len=win_len,
lr=lr,
Ntrials=N_trials,
N_epochs=N_epochs,
RandomPick=False)
#architecture = 'slapnicar'
#date = "12-07-2021"
#HomePath = expanduser("~")
#experiment_name = "mimic_iii_ppg_nonmixed_pretrain"
#ModelFile = join(HomePath, 'data', 'Sensors-Paper', 'ppg_pretrain',
# date + "_" + architecture + "_" + experiment_name + '_cb.h5')
#DataDir = join(HomePath,'data','MIMIC-III_BP', 'tfrecords_nonmixed')
#ResultsDir = join(HomePath,'Arbeit','7_Paper', '2021_Sensors_BP_ML', 'results', 'ppg_personalization')
#CheckpointDir = join(HomePath,'data','MIMIC-III_BP', 'checkpoints')
#tfrecord_basename = 'MIMIC_III_ppg'
#learning_rate = None
#ppg_personalization_mimic_iii(DataDir,
# ResultsDir,
# ModelFile,
# CheckpointDir,
# tfrecord_basename,
# date+'_' + architecture+ '_' +experiment_name,
# win_len=875,
# lr=learning_rate,
# Ntrials=20,
# N_epochs=100,
# RandomPick=False)
|
Fabian-Sc85/non-invasive-bp-estimation-using-deep-learning
|
ppg_personalization_mimic_iii.py
|
ppg_personalization_mimic_iii.py
|
py
| 12,573 |
python
|
en
|
code
| 96 |
github-code
|
50
|
9445686087
|
from django.shortcuts import render, redirect
from bs4 import BeautifulSoup
import requests
# Create your views here.
def home(request):
if request.method == "POST":
url = request.POST.get("href")
# Check url is under ptt domain
if url[0:22] != "https://www.ptt.cc/bbs":
url = "https://www.ptt.cc/bbs/" + url + "/index.html"
if "index" in url:
tag = 0
else:
tag = 1
html = requests.get(url, cookies={"over18":"1"})
html.decoding = "utf-8"
if html.status_code != 200:
message = "輸入錯誤或查無此看板、文章,請重新輸入。"
return render(request, 'appPttParser/home.html', {"message":message})
soup = BeautifulSoup(html.text, 'html.parser')
soup_list = soup.find_all("a")
image_list = []
if tag == 0:
for atag in soup_list:
if "M." in str(atag.get("href")):
html = requests.get("https://www.ptt.cc" + str(atag.get("href")), cookies={"over18":"1"})
html.decoding = "utf-8"
soup = BeautifulSoup(html.text, "html.parser")
# article_url = "https://www.ptt.cc" + str(atag.get("href"))
temp = ("https://www.ptt.cc" + str(atag.get("href")), soup.title.text)
image_list.append(temp)
img_tag = soup.find_all("a")
for img in img_tag:
if ".jpg" in str(img.get("href")) or ".png" in str(img.get("href")):
image_list.append(str(img.get("href")))
else:
temp = (url, soup.title.text)
image_list.append(temp)
for atag in soup_list:
image_href = str(atag.get("href"))
if ".jpg" in image_href or ".png" in image_href:
image_list.append(image_href)
return render(request, 'appPttParser/show_img.html', locals())
return render(request, 'appPttParser/home.html', locals())
def show_img(request):
return render(request, 'appPttParser/show_img.html')
|
MatsuiLin101/ml101-site
|
appPttParser/views.py
|
views.py
|
py
| 2,170 |
python
|
en
|
code
| 0 |
github-code
|
50
|
8148804844
|
import os
import json
import dotenv
import openai
import streamlit as st
from streamlit_chat import message
# .env file must have OPENAI_API_KEY and OPENAI_API_BASE
dotenv.load_dotenv()
openai.api_type = "azure"
openai.api_base = os.getenv("OPENAI_API_BASE")
openai.api_version = "2023-03-15-preview"
openai.api_key = os.getenv("OPENAI_API_KEY")
system_msg = """
You're a creative and detail-oriented Product Naming Specialist.
You're responsible for developing and executing naming strategies for our products and services.
You have a deep understanding of brand identity and positioning, as well as experience in developing compelling and memorable product names.
"""
examples = """
"""
ENGINE = "chatgpt"
TEMPERATURE = 0.9
MAX_TOKENS = 200
TOP_P = 1
FREQUENCY_PENALTY = 1.0
PRESENCE_PENALTY = 1.0
st.set_page_config(page_title="Creative Product Naming Assistant", page_icon=":robot_face:", layout="wide", initial_sidebar_state="collapsed")
if "IsBegin" not in st.session_state:
st.session_state["IsBegin"] = False
if "history_conversations" not in st.session_state:
st.session_state["history_conversations"] = []
st.write("# Creative Product Naming Assistant")
# define custom function to run the openai.ChatCompletion.create function
def run(history: list or None, user_msg: str):
if history is None:
messages = [{"role":"system", "content":system_msg}, {"role":"user","content":user_msg}]
st.session_state.history_conversations.append({"role":"system", "content":system_msg})
st.session_state.history_conversations.append({"role":"user","content":user_msg})
else:
messages = history + [{"role":"user","content":user_msg}]
st.session_state.history_conversations.append({"role":"user","content":user_msg})
res = openai.ChatCompletion.create(
engine=ENGINE,
messages = messages,
temperature=TEMPERATURE,
max_tokens=MAX_TOKENS,
top_p=TOP_P,
frequency_penalty=FREQUENCY_PENALTY,
presence_penalty=PRESENCE_PENALTY,
n=1
)
st.session_state.history_conversations.append({"role":"assistant", "content":res.choices[0].message['content']})
# sidebar for system messages and examples
with st.sidebar:
examples_tab, system_tab = st.tabs(["examples", "system"])
# samples
with examples_tab:
st.header("Examples")
examples = st.text_input(label="Add examples")
with system_tab:
st.header("System messages")
system_msg = st.text_area(label="Edit system message", value=system_msg)
# display history conversations
with st.container():
# create a input using streamlit
user_msg = st.text_input(label="Type your message here", value="")
# create a button using streamlit
if st.button("Send"):
if st.session_state["IsBegin"] == False:
st.session_state["IsBegin"] = True
run(history = None, user_msg = user_msg)
else:
run(history = st.session_state.history_conversations, user_msg = user_msg)
if st.session_state["IsBegin"] == True:
# display history conversations in reverse order
for i in range(len(st.session_state.history_conversations)-1, -1, -1):
if st.session_state.history_conversations[i]["role"] == "user":
message(st.session_state.history_conversations[i]["content"], is_user=True, key=str(i))
elif st.session_state.history_conversations[i]["role"] == "assistant":
message(st.session_state.history_conversations[i]["content"], key=str(i))
elif st.session_state.history_conversations[i]["role"] == "system":
message(st.session_state.history_conversations[i]["content"], key=str(i))
with st.container():
st.info(st.session_state.history_conversations)
if len(st.session_state.history_conversations) > 0:
# download chat history as json file
# download as json file
st.download_button(label="Save chat history", data=json.dumps(st.session_state.history_conversations), file_name="chat_history.json", mime="application/json")
|
hyssh/azure-openai-quickstart
|
quickstart-learnfast/creative-product-naming-assistant/app.py
|
app.py
|
py
| 4,098 |
python
|
en
|
code
| 2 |
github-code
|
50
|
39512273388
|
import math
with open('14/input2.txt', 'rt') as fp:
lines = fp.readlines();
class Reaction:
def __init__(self, formula):
self.components = {}
components, output = formula.split('=>')
self.parseComponents(components)
self.quantity, self.reagent = output.strip().split(' ')
self.quantity = int(self.quantity)
def parseComponents(self, componentLine):
for component in componentLine.strip().split(','):
qty, reagent = component.strip().split(' ')
self.components[reagent] = int(qty)
def calculateOre(reaction):
totalOre = 0
for c in reaction.components.keys():
if c != 'ORE':
qty, ore = calculateOre(reactions[c])
qty += excess.get(c, 0)
totalOre += math.ceil(reaction.components[c]/qty) * ore
if qty > reaction.components[c]:
excess[c] = qty - reaction.components[c]
else:
return reaction.quantity, reaction.components[c]
return reaction.quantity, totalOre
reactions = {}
excess = {}
for l in lines:
r = Reaction(l)
reactions[r.reagent] = r
print(calculateOre(reactions['FUEL']))
|
dshookowsky/adventOfCode
|
2019/14/14a.py
|
14a.py
|
py
| 1,188 |
python
|
en
|
code
| 0 |
github-code
|
50
|
29621436523
|
# -*- coding:utf-8 -*-
import json
import os.path as osp
class DatasetLoader:
def __init__(self, qas_path, owl_path):
self.owl_path = owl_path
self.qas_path = qas_path
self.owls = dict() # {scene_name: owl_contents}
self.qas = self.load_qa_scenario(qas_path)
# qas: {'FloorPlan1_S0.json": {
# "10": {
# 'existence': ["Exsistence/Bread/10", false],
# 'counting':[], 'attribute':[], 'relation':[],'agenthave':[], 'include':[]
# }}}
self.scene_names = ['FloorPlan'+str(sn) for sn in range(1, 31)]
def __len__(self):
pass
def load_qa_scenario(self, path):
with open(path, 'r') as f:
qas = json.load(f)
return qas
def generator(self):
for fname, qa_steps in self.qas.items():
room_name = fname.split('_')[0] # "FloorPan30_S0.json"
seed_name = fname.split('_')[1].split('.')[0]
room_dir = osp.join(self.owl_path, room_name, seed_name)
owl_path = osp.join(room_dir, fname.split('.')[0]+'_T') # "FloorPan30_S0"
for step, qa_set in qa_steps.items():
try:
with open(owl_path+step+'.owl') as f: # 그냥 step 안쓰고 하나 빼준 이유는 gt에 비해 pred가 항상 하나씩 적은데, 알고리즘적으로 하나 스텝이 딸리는거가틈
owl = f.read()
except:
print(f'no file !! [{owl_path+step+".owl"}') # (str(int(step)-1))
continue
yield (qa_set, owl, room_dir)
if __name__ == '__main__':
'''
p = os.listdir('./results/owl')
with open(os.path.join('./results/owl', p[0])) as f:
a = f.read()
qa = h5py.File('./existence.h5')
print(list(qa['questions']['question']))
'''
dataset = DatasetLoader(qas_path='/home/ailab/DH/ai2thor/datasets/qa_scenario.json',
owl_path='/home/ailab/DH/ai2thor/datasets/gsg_pred/owl')
for i, (qa_set, owl, room_name) in enumerate(dataset.generator()):
print(qa_set)
print(room_name)
print(owl)
break
|
donghyeops/3D-SGG
|
VeQA/dataset_loader.py
|
dataset_loader.py
|
py
| 2,304 |
python
|
en
|
code
| 2 |
github-code
|
50
|
72088335514
|
from collections import defaultdict
from typing import Union
class Graph:
""" Undirected graph data structure """
def __init__(self, connections):
self.graph = defaultdict(set)
self.add_connections(connections)
def add_connections(self, connections):
""" Add connections (list of tuple pairs) to graph """
for node1, node2 in connections:
self.add(node1, node2)
def add(self, node1, node2):
""" Add connection between node1 and node2 """
self.graph[node1].add(node2)
self.graph[node2].add(node1)
def is_connected(self, node1, node2):
""" Is node1 directly connected to node2 """
return node1 in self.graph and node2 in self.graph[node1]
def __str__(self):
return '{}({})'.format(self.__class__.__name__, dict(self.graph))
def transform_graph(source_graph_repr: Union[list, dict, 'Graph'],
representation: str) -> Union[dict, list, 'Graph']:
if isinstance(source_graph_repr, list):
vertices = []
for n, i in enumerate(source_graph_repr):
for m, j in enumerate(i):
if j:
edge = sorted((n, m))
if edge not in vertices:
vertices.append(edge)
class_graph = Graph(vertices)
elif isinstance(source_graph_repr, dict):
vertices = []
for i in source_graph_repr:
for j in source_graph_repr[i]:
edge = sorted((i, j))
if edge not in vertices:
vertices.append(edge)
class_graph = Graph(vertices)
elif isinstance(source_graph_repr, Graph):
class_graph = source_graph_repr
if representation == 'matrix':
matrix_graph = []
for v in class_graph.graph.values():
adjacent = [0] * len(class_graph.graph)
for i in v:
adjacent[i] = 1
matrix_graph.append(adjacent)
return matrix_graph
elif representation == 'adjacency':
return dict(class_graph.graph)
elif representation == 'class':
return class_graph
# Пример для проверки - один и тот же граф в разных представлениях
matrix_graph = [[0, 1, 1, 0, 0, 0],
[1, 0, 0, 1, 1, 0],
[1, 0, 0, 0, 0, 1],
[0, 1, 0, 0, 0, 0],
[0, 1, 0, 0, 0, 1],
[0, 0, 1, 0, 1, 0]]
adjacency_list_graph = {0: [1, 2],
1: [0, 3, 4],
2: [0, 5],
3: [1],
4: [1, 5],
5: [2, 4]}
vertices = [(0, 1), (0, 2), (1, 3), (1, 4), (2, 5), (4, 5)]
class_graph = Graph(vertices)
print(transform_graph(matrix_graph, 'class'))
|
BunnyNoBugs/Classroom-Year-3
|
midterm_test/graph_representation.py
|
graph_representation.py
|
py
| 2,843 |
python
|
en
|
code
| 2 |
github-code
|
50
|
16549532158
|
import requests
import warnings
def results_to_names(results, include_synonyms=True):
"""Takes OLS term query returns list of all labels and synonyms"""
out = []
for t in results['_embedded']['terms']:
out.append(t['label'])
if include_synonyms and t['synonyms']:
out.extend(t['synonyms']) # Consider being more conservative
# and only selecting synonyms
# of a certain type
return out
def curie_2_obo_ontology(curie):
cp = curie.split(':')
if not len(cp) == 2:
raise Exception("{} is not a curie".format(curie))
db = cp[0]
# acc = cp[1]
ontology = db.lower()
return ontology
class OLSQueryWrapper:
# Should probably (re)-consider using pip install ols-client.
def __init__(self):
self.api_base = "https://www.ebi.ac.uk/ols/api/ontologies"
self.upper_ont_filters = {}
def set_upper_ont_filter(self, ont, upper_bound_term):
if not (ont in self.upper_ont_filters.keys()):
self.upper_ont_filters[ont] = set()
self.upper_ont_filters[ont].update(set(
results_to_names(
self.query(upper_bound_term, 'ancestors'))))
def set_upper_ont_filter_cl_cell(self):
self.set_upper_ont_filter('cl', 'CL:0000548')
def set_upper_ont_filter_fbbt_cell(self):
self.set_upper_ont_filter('fbbt', 'FBbt:00007002')
def _gen_query_url(self, curie, query, id_field='id'):
"""Use curie to generate OBO-style ontology identifier
Check whether ontology exists
return query URL
query may be: terms, descendants, parents, children, ancestors.
terms query requires id_field='obo_id'"""
cp = curie.split(':')
if not len(cp) == 2:
raise Exception("{} is not a curie".format(curie))
db = cp[0]
# acc = cp[1]
ontology = db.lower()
if ontology == 'bfo' and query == 'properties':
ontology = 'ro'
if self.check_ontology(ontology):
return '/'.join([self.api_base, ontology,
query + '?' + id_field + '=' + curie]) # Yuk - can id be passed as data?
else:
return False
def check_ontology(self, o):
"""Check whether ontology 'o' is known to OLS. Returns boolean."""
r = requests.get('/'.join([self.api_base, o]))
# Exception handling is a bit crude.
if r.status_code == 200:
j = r.json()
if "ontologyId" in j.keys() and j['ontologyId'] == o:
return True
warnings.warn("The ontology %s is not known to OLS" % o)
return False
def query(self, curie, query):
"""curie must be a valid OBO curie e.g. CL:0000017
query may be: terms, descendants, parents, children, ancestors
returns JSON or False."""
# TODO: Extend to work for non OBO Library ontologies (pass a curie map)
# TODO: Add paging
# TODO: For terms query - add backup query for properties.
### For terms query, treating curie as OBO ID:
id_field = 'id'
if query == 'terms':
id_field = 'obo_id'
url = self._gen_query_url(curie, query, id_field=id_field)
# print(url)
if not url:
return False
response = requests.get(url)
if response.status_code == 404:
if query == "terms":
query = "properties"
url = self._gen_query_url(curie, query, id_field=id_field)
if not url:
return False
# print(url)
response = requests.get(url)
if response.status_code == 404:
warnings.warn("Content not found: %s" % curie)
else:
warnings.warn("Content not found: %s" % curie)
elif response.status_code == 200:
results = response.json()
if not ('_embedded' in results.keys()) or not ('terms' in results['_embedded'].keys()):
warnings.warn("No term returned.")
# TODO: Improve warnings error handling.
# This is very unsatisfactory - but this has to cover both empty results lists
# and unrecognised query word
return results
else:
raise ConnectionError(" %s (%s) on query for %s. "
"" % (response.status_code,
response.reason,
curie))
def get_ancestor_labels(self, curie):
al = self.query(curie, 'ancestors')
if al:
obo = curie_2_obo_ontology(curie)
if obo == 'cl':
if not 'cl' in self.upper_ont_filters.keys():
self.set_upper_ont_filter_cl_cell()
if obo == 'fbbt':
if not 'fbbt' in self.upper_ont_filters.keys():
self.set_upper_ont_filter_cl_cell()
if obo in self.upper_ont_filters.keys():
return set(results_to_names(al)) - set(self.upper_ont_filters[obo])
else:
return results_to_names(al)
else:
return []
def get_term(self, curie):
# url = self.gen_query_url(curie, 'terms', id_field='obo_id')
# r = requests.get(url)
return self.query(curie, query='terms')
|
HumanCellAtlas/matrix_semantic_map
|
src/matrix_semantic_map/OLS_tools.py
|
OLS_tools.py
|
py
| 5,445 |
python
|
en
|
code
| 1 |
github-code
|
50
|
71057046876
|
#!/usr/bin/env python
import sys
import re
# Simple Python script that takes PlatformIO's compiler errors and maps them to
# output that can be understood by the Actions runner.
re_err = re.compile(r"^([^:]+):([0-9]+):([0-9]+): error: (.*)$")
# Parameters are strings of the form
# path_prefix:replacement_prefix:line_offset
# Where all paths starting with path_prefix will be replced with replacement_prefix,
# and if such a replacement takes place, the line number will be shifted by line_offset.
# That allows taking care for inserted code like the #include <Arduino.h>
mappings = []
for arg in sys.argv[1:]:
parts = arg.split(":", 2)
mappings.append((*parts[0:2], 0 if len(parts)==2 else int(parts[2])))
for line in sys.stdin:
print(line, end="")
m = re_err.match(line.strip())
if m is not None:
name = m.group(1)
lineno = int(m.group(2))
for mapping in mappings:
if name.startswith(mapping[0]):
name = mapping[1] + name[len(mapping[0]):]
lineno += mapping[2]
print("::error file={name},line={line},col={col}::{message}".format(
name=name, line=lineno, col=m.group(3), message=m.group(4)
))
|
fhessel/esp32_https_server
|
extras/ci/scripts/pio-to-gh-log.py
|
pio-to-gh-log.py
|
py
| 1,157 |
python
|
en
|
code
| 292 |
github-code
|
50
|
40241578160
|
import FWCore.ParameterSet.Config as cms
externalLHEProducer = cms.EDProducer("EmbeddingLHEProducer",
src = cms.InputTag("selectedMuonsForEmbedding","",""),
vertices = cms.InputTag("offlineSlimmedPrimaryVertices","","SELECT"),
particleToEmbed = cms.int32(15),
rotate180 = cms.bool(False),
mirror = cms.bool(False),
studyFSRmode = cms.untracked.string("reco")
)
makeexternalLHEProducer = cms.Sequence( externalLHEProducer)
|
cms-sw/cmssw
|
TauAnalysis/MCEmbeddingTools/python/EmbeddingLHEProducer_cfi.py
|
EmbeddingLHEProducer_cfi.py
|
py
| 448 |
python
|
en
|
code
| 985 |
github-code
|
50
|
16164098320
|
from os import sep
with open(f'inputs{sep}day_3.txt') as rf:
lines = [line.strip() for line in rf.readlines()]
class Claim:
def __init__(self, owner=None, origin=None, span=None):
self.owner = owner
self.origin = origin
self.x = int(origin[0])
self.w = int(span[0])
self.y = int(origin[1])
self.h = int(span[1])
self.span = span
def get_area(self):
return int(self.span[0])*int(self.span[1])
def make_claims(claim_set):
claims = set()
for c in claim_set:
cs = c.split()
owner = int(cs[0][1:])
origin = tuple(cs[2][:-1].split(","))
span = tuple(cs[3].split("x"))
claims.add(Claim(owner, origin, span))
return claims
if __name__ == '__main__':
elf_claims = make_claims(lines)
#Part 1:
def make_board(claim_set):
tiles = {}
for claim in elf_claims:
for i in range(claim.x, claim.x + claim.w):
for j in range(claim.y, claim.y + claim.h):
if not f"{i},{j}" in tiles: tiles[f"{i},{j}"] = 1
else: tiles[f"{i},{j}"] += 1
return tiles, len([v for v in tiles.values() if v > 1])
print(make_board(elf_claims)[1])
#Answer: 100595
#Part 2:
def get_lonely_claim(claim_set):
tiles = make_board(elf_claims)[0]
all_alone = True
for claim in claim_set:
for i in range(claim.x, claim.x + claim.w):
for j in range(claim.y, claim.y + claim.h):
if tiles[f"{i},{j}"] != 1:
all_alone = False
if not all_alone: break
if not all_alone: break
if all_alone: return claim.owner
all_alone = True
print(get_lonely_claim(elf_claims))
#Answer: 415
|
Nathansbud/AdventOfCode
|
2018/day_3.py
|
day_3.py
|
py
| 1,832 |
python
|
en
|
code
| 1 |
github-code
|
50
|
29596572847
|
from gevent import monkey
monkey.patch_all()
import logging
logging.basicConfig(level=logging.DEBUG)
logging.getLogger("urllib3").setLevel(logging.WARNING)
logging.getLogger("blockchain").setLevel(logging.DEBUG)
logging.getLogger("channel_manager").setLevel(logging.DEBUG)
log = logging.getLogger(__name__)
from microraiden.make_helpers import make_paywalled_proxy
from requests.exceptions import ConnectionError
from web3 import HTTPProvider, Web3
from flask import Flask
import config
import sys
import uwsgi
import gevent
from bs4 import BeautifulSoup
from flask import make_response
import io
from microraiden.examples.demo_proxy.fortunes import PaywalledFortune
class MyPaywalledFortune(PaywalledFortune):
def __init__(self, path, cost, filepath):
super(MyPaywalledFortune, self).__init__(path, cost, filepath)
html_tmpl = io.open('web/fortunes_tmpl.html', 'r', encoding='utf8').read()
self.soup_tmpl = BeautifulSoup(html_tmpl, 'html.parser')
def get(self, url):
headers = {'Content-Type': 'text/html; charset=utf-8'}
text = self.fortunes.get()
return make_response(self.generate_html(text), 200, headers)
def generate_html(self, text):
div = self.soup_tmpl.find('div', {"id" : "fortunes-text"})
div.h1.string = text
return str(self.soup_tmpl)
#
# This is an example of a simple uwsgi/Flask app using Microraiden to pay for the content.
# Set up the configuration values in config.py (at least you must set PRIVATE_KEY, RPC_PROVIDER).
#
if config.PRIVATE_KEY is None:
log.critical("config.py: PRIVATE_KEY is not set")
sys.exit(1)
if config.RPC_PROVIDER is None:
log.critical("config.py: RPC_PROVIDER is not set")
sys.exit(1)
# create a custom web3 provider - parity/geth runs in another container/on another host
try:
web3 = Web3(HTTPProvider(config.RPC_PROVIDER, request_kwargs={'timeout': 60}))
network_id = web3.version.network
except ConnectionError:
log.critical("Ethereum node isn't responding. Restarting after %d seconds."
% (config.SLEEP_RELOAD))
gevent.sleep(config.SLEEP_RELOAD)
uwsgi.reload()
# create flask app
app = Flask(__name__)
# create microraiden app
microraiden_app = make_paywalled_proxy(config.PRIVATE_KEY, config.STATE_FILE,
web3=web3, flask_app=app)
# add some content
microraiden_app.add_content(MyPaywalledFortune("fortunes_en", 1, "microraiden/data/fortunes"))
microraiden_app.add_content(MyPaywalledFortune("fortunes_cn", 1, "microraiden/data/chinese"))
# only after blockchain is fully synced the app is ready to serve requests
microraiden_app.channel_manager.wait_sync()
|
ilhanu/ether-academy
|
Code_research/microraiden/docker/uwsgi/app/app.py
|
app.py
|
py
| 2,702 |
python
|
en
|
code
| 0 |
github-code
|
50
|
26489706317
|
######################################################################################
### Gene set enrichment analysis with GSEAPY
######################################################################################
### Author: Carlos Arevalo
### Email: [email protected]
### PROGRAM DESCRIPTION
### Program takes as input a DTU table from DrimSeq or any other differential expression/usage analysis
### containing "symbol", "gene_padj" and "log2fcp" and perform gene-set enrichment analysis (GSEA) and
### pre-rank GSEA (pGSEA) using the GSEAPY package. Program outputs a main table for GSEA and pGSEA,
### respectively. Analysis can be perform on both individual or a list of libraries. Some Human test
### libabries include 'GO_Biological_Process_2021', 'GO_Molecular_Function_2021', 'GO_Cellular_Component_2021',
### 'KEGG_2021_Human', 'Reactome_2022', and 'MSigDB_Hallmark_2020'
### INSTALLATION
### pip install gseapy
### conda install -c bioconda gseapy
### PROGRAM USAGE
#python .../gsea/compute_gsea.py \
# -i drimseq_deltaPSI_padj_results.txt \
# -s "Human" \
# -c "global" \
# -l GO_Biological_Process_2021 GO_Molecular_Function_2021 GO_Cellular_Component_2021 KEGG_2021_Human Reactome_2022 MSigDB_Hallmark_2020 \
# -t 1.5 \
# -e 0.5 \
# -n 10 \
# -o /gsea_output/
######################################################################################
import gseapy as gp
from gseapy.plot import barplot, dotplot
from gseapy import enrichment_map
from gseapy.plot import gseaplot
import matplotlib.pyplot as plt
import networkx as nx
import pandas as pd
import numpy as np
import sys
import os
import argparse
import warnings
class CommandLine():
def __init__(self, inOpts=None):
self.parser = argparse.ArgumentParser(
description = "compute_gsea.py - computes GSEA using GSEAPY package",
epilog = "Program epilog - please provide corrections and implementations for the program",
add_help = True,
prefix_chars = "-",
usage = "%(prog)s [options] -option1[default] <input>output")
self.parser.add_argument("-i", "--input", type=str, required=True, help='Input data in the form of table/data-frame')
self.parser.add_argument("-s", "--organism", type=str, required=True, help='Organism for analysis')
self.parser.add_argument("-c", "--condition", type=str, required=True, help='Condition label')
self.parser.add_argument("-l", "--library", type=str, nargs='+', required=True, help='Library list')
self.parser.add_argument("-t", "--threshold", type=float, required=True, help='Log2 FC threshold for significant genes selection')
self.parser.add_argument("-e", "--enrich_threshold", type=float, required=True, help='Threshold for enrichment analysis')
self.parser.add_argument("-p", "--pval_threshold", type=float, required=True, help='Log2 FC threshold for significant genes selection')
self.parser.add_argument("-n", "--terms", type=int, required=True, help='Number of top terms to plot')
self.parser.add_argument("-o", "--output", type=str, required=True, help='Output directory for results')
if inOpts is None:
self.args = self.parser.parse_args()
else:
self.args = self.parser.parse_args(inOpts)
class computeGSEA():
def readData(inFile, threshold, pval):
'''
Read input data
'''
warnings.filterwarnings("ignore")
df = pd.read_csv(inFile, header=None, sep="\t")
df.columns = df.iloc[0]
df = df[1:]
df.reset_index(drop=True, inplace=True)
temp_df = df[["symbol", "gene_padj", "log2fc", "feature_id"]]
temp_df['gene_padj'] = temp_df['gene_padj'].astype(float)
temp_df['log2fc'] = temp_df['log2fc'].astype(float)
temp_sig = temp_df.loc[temp_df.gene_padj < pval] #0.05]
down_df = temp_sig[(temp_sig.log2fc < -threshold)]
up_df = temp_sig[(temp_sig.log2fc > threshold)]
data = pd.concat([up_df, down_df])
return data
def readPrerank(data, threshold, pval):
'''
Read input data
'''
warnings.filterwarnings("ignore")
df = pd.read_csv(data, header=None, index_col=0, sep="\t")
df.columns = df.iloc[0]
df = df[1:]
df.reset_index(drop=True, inplace=True)
temp_df = df[["symbol", "gene_padj", "log2fc"]]
temp_df['gene_padj'] = temp_df['gene_padj'].astype(float)
temp_df['log2fc'] = temp_df['log2fc'].astype(float)
temp_sig = temp_df.loc[temp_df.gene_padj < pval] #0.05]
down_df = temp_sig[(temp_sig.log2fc < -threshold)]
up_df = temp_sig[(temp_sig.log2fc > threshold)]
data = pd.concat([up_df, down_df])
data['rank'] = data['gene_padj']*data['log2fc']
temp_sorted = data.sort_values('rank', ascending=False)
uniq_df = temp_sorted.drop_duplicates(subset=['symbol'])
data = pd.DataFrame()
data[1] = list(uniq_df["rank"])
data.index = list(uniq_df["symbol"])
return data
def readPositives(data, threshold, pval):
'''
Get genes with positive LFC values
'''
warnings.filterwarnings("ignore")
df = pd.read_csv(data, header=None, index_col=0, sep="\t")
df.columns = df.iloc[0]
df = df[1:]
df.reset_index(drop=True, inplace=True)
temp_df = df[["symbol", "gene_padj", "log2fc"]]
temp_df['gene_padj'] = temp_df['gene_padj'].astype(float)
temp_df['log2fc'] = temp_df['log2fc'].astype(float)
temp_sig = temp_df.loc[temp_df.gene_padj < pval] #0.05]
pos_df = temp_sig[(temp_sig.log2fc > threshold)]
pos_df['rank'] = pos_df['gene_padj']*pos_df['log2fc']
temp_sorted_pos = pos_df.sort_values('rank', ascending=False)
uniq_pos = temp_sorted_pos.drop_duplicates(subset=['symbol'])
pos_data = pd.DataFrame()
pos_data[1] = list(uniq_pos["rank"])
pos_data.index = list(uniq_pos["symbol"])
return pos_data
def readNegatives(data, threshold, pval):
'''
Get genes with negative LFC values
'''
warnings.filterwarnings("ignore")
df = pd.read_csv(data, header=None, index_col=0, sep="\t")
df.columns = df.iloc[0]
df = df[1:]
df.reset_index(drop=True, inplace=True)
temp_df = df[["symbol", "gene_padj", "log2fc"]]
temp_df['gene_padj'] = temp_df['gene_padj'].astype(float)
temp_df['log2fc'] = temp_df['log2fc'].astype(float)
temp_sig = temp_df.loc[temp_df.gene_padj < pval] #0.05]
down_df = temp_sig[(temp_sig.log2fc < -threshold)]
down_df['rank'] = (-1)*down_df['gene_padj']*down_df['log2fc']
temp_sorted = down_df.sort_values('rank', ascending=False)
uniq_down = temp_sorted.drop_duplicates(subset=['symbol'])
down_data = pd.DataFrame()
down_data[1] = list(uniq_down["rank"])
down_data.index = list(uniq_down["symbol"])
return down_data
def df2List(df):
"""
Convert dataframe or series to list
"""
warnings.filterwarnings("ignore")
temp_df = pd.DataFrame()
gene_list = pd.unique(list(df["symbol"])).tolist()
gene_list = [x for x in gene_list if str(x) != 'nan']
temp_df[0] = gene_list
return(temp_df)
def enrichR(gene_list, gene_set, organism, threshold, layout, output):
"""
Peforms enrichr analysis
"""
enr = gp.enrichr(gene_list=gene_list,
gene_sets=gene_set,
organism=organism,
no_plot=True,
cutoff=threshold,
outdir='{output_dir}{label}_enrichr_analysis'.format(output_dir=output, label=layout)
)
enr_results = enr.results
enr_results = enr_results[enr_results["Adjusted P-value"] < 0.05]
return(enr_results)
def barPlot(df, title, top_term, layout, output):
"""
Plots pathways barplot
"""
plot = barplot(df, column="Adjusted P-value", size=10,
top_term=top_term, title=title)
plot.figure.savefig('{output_dir}{label}_enrichment_barplot.png'.format(
output_dir=output, label=layout),
bbox_inches="tight",
dpi=600)
def dotPlot(df, title, top_term, layout, output):
"""
Plots pathways dotplot
"""
plot2 = dotplot(df, size=10, top_term=top_term, title=title,
marker='o', show_ring=False, cmap="seismic",)
plot2.figure.savefig(
'{output_dir}{label}_enrichment_dotplot.png'.format(
output_dir=output, label=layout),
bbox_inches="tight",
dpi=600)
def enrichmentPlot(df, top_term, output):
"""
Plot enrichment analysis for a defined number of terms
"""
results = df.sort_index().head()
terms = df.Term
for i in range(1, top_term):
term = terms[i]
plot = gseaplot(rank_metric=df.ranking,
term = df.Term[i],
**df[terms[i]])
plot.figure.savefig(
'{output_dir}term_{label}_gsea_plot.png'.format(output_dir=output, label=term),
bbox_inches="tight",
dpi=600)
def prerankGSEA(rank_df, gset, top_term, layout, output):
"""
Enrichr libraries are supported by prerank module. Just provide the name
use 4 process to acceralate the permutation speed
"""
prerank = gp.prerank(rnk=rank_df,
gene_sets=gset,
threads=4,
min_size=10,
max_size=1000,
processes=4,
permutation_num=100,
ascending=False,
outdir='{output_dir}{label}_prerank_report'.format(output_dir=output, label=layout),
format='png',
seed=6,
verbose=True)
return prerank
def enrichmentMap(df, layout, output):
"""
Performs enrichment mapping
"""
nodes, edges = enrichment_map(df)
graph = nx.from_pandas_edgelist(edges,
source='src_idx',
target='targ_idx',
edge_attr=['jaccard_coef', 'overlap_coef', 'overlap_genes'])
fig, ax = plt.subplots(figsize=(7, 7))
pos = nx.layout.spiral_layout(graph)
nx.draw_networkx_nodes(graph,
pos=pos,
cmap=plt.cm.RdYlBu,
node_color=list(nodes.NES),
node_size=list(nodes.Hits_ratio*1000))
nx.draw_networkx_labels(graph,
pos=pos,
labels=nodes.Term.to_dict())
edge_weight = nx.get_edge_attributes(graph, 'jaccard_coef').values()
nx.draw_networkx_edges(graph,
pos=pos,
width=list(map(lambda x: x*10, edge_weight)),
edge_color='#CDDBD4')
plt.savefig(
'{output_dir}{label}_pca_projection.png'.format(output_dir=output, label=layout),
bbox_inches="tight",
dpi=600)
def main(incl=None):
if incl is None:
command_line = CommandLine()
if command_line.args.input:
output = command_line.args.output
if not os.path.isdir(output):
os.mkdir(output)
inFile = command_line.args.input
organism = command_line.args.organism
library = command_line.args.library
condition = command_line.args.condition
lfc_threshold = command_line.args.threshold
enrich_threshold = command_line.args.enrich_threshold
pval_threshold = command_line.args.pval_threshold
top_terms = command_line.args.terms
sets = '_'.join(library)
print("Computing global enrichment analysis...")
input_data = computeGSEA.readData(inFile, threshold=lfc_threshold, pval=pval_threshold)
gene_list = computeGSEA.df2List(input_data)
enrich = computeGSEA.enrichR(gene_list=gene_list,
gene_set=library,
organism=organism,
threshold=enrich_threshold,
layout=condition,
output=output)
computeGSEA.barPlot(df=enrich, title="test", top_term=top_terms,
layout=condition, output=output)
computeGSEA.dotPlot(df=enrich, title="test", top_term=top_terms,
layout=condition, output=output)
prerank_df = computeGSEA.readPrerank(data=inFile, threshold=lfc_threshold, pval=pval_threshold)
prerank = computeGSEA.prerankGSEA(rank_df=prerank_df, gset=library,
top_term=top_terms, layout="all", output=output)
prerank_res = prerank.res2d
print("Computing positives and negatives only enrichment analysis...")
pos_prerank_df = computeGSEA.readPositives(data=inFile, threshold=lfc_threshold, pval=pval_threshold)
neg_prerank_df = computeGSEA.readNegatives(data=inFile, threshold=lfc_threshold, pval=pval_threshold)
pos_prerank = computeGSEA.prerankGSEA(rank_df=pos_prerank_df, gset=library,
top_term=top_terms, layout="positive", output=output)
neg_prerank = computeGSEA.prerankGSEA(rank_df=neg_prerank_df, gset=library,
top_term=top_terms, layout="negative", output=output)
pos_prerank_res = pos_prerank.res2d
neg_prerank_res = neg_prerank.res2d
prerank_sig = prerank_res.loc[prerank_res["FDR q-val"]<0.05]
pos_prerank_sig = pos_prerank_res.loc[pos_prerank_res["FDR q-val"]<0.05]
neg_prerank_sig = neg_prerank_res.loc[neg_prerank_res["FDR q-val"]<0.05]
if not prerank_sig.empty:
computeGSEA.enrichmentMap(df=prerank, layout=condition, output=output)
computeGSEA.enrichmentPlot(df=prerank, top_term=10, output=output)
if prerank_sig.empty:
print("\033[1mNo significant pre-rank terms found.\n")
if not pos_prerank_sig.empty:
computeGSEA.enrichmentMap(df=pos_prerank, layout=condition, output=output)
computeGSEA.enrichmentPlot(df=pos_prerank, top_term=10, output=output)
if pos_prerank_sig.empty:
print("\033[1mNo significant positive pre-rank terms found.\n")
if not neg_prerank_sig.empty:
computeGSEA.enrichmentMap(df=neg_prerank, layout=condition, output=output)
computeGSEA.enrichmentPlot(df=neg_prerank, top_term=10, output=output)
if neg_prerank_sig.empty:
print("\033[1mNo significant negative pre-rank terms found.\n")
print("\033[1mGSEA and prerank GSEA analysis complete.\n")
if __name__ == '__main__':
main()
|
caeareva/DM-DASE
|
gsea/compute_gsea_program.py
|
compute_gsea_program.py
|
py
| 13,621 |
python
|
en
|
code
| 0 |
github-code
|
50
|
34088264915
|
import os
import discord
from dotenv import load_dotenv
import logging
from services import get_matches_by_date
from random import choice
load_dotenv()
TOKEN = os.getenv('DISCORD_TOKEN')
handler = logging.FileHandler(filename='discord.log', encoding='utf-8', mode='w')
intents = discord.Intents.default()
intents.message_content = True
client = discord.Client(intents=intents)
@client.event
async def on_ready():
print(f'We have logged in as {client.user}')
@client.event
async def on_message(message):
if message.author == client.user:
return
if message.content.startswith('!hello'):
greeting = choice(["Hi", "Hello", "Hey"])
await message.channel.send(f'{greeting} {message.author.display_name}!')
if message.content.startswith('!matches'):
try:
date = message.content.split(" ")[1]
except IndexError:
date = ""
await message.channel.send(get_matches_by_date(date))
@client.event
async def on_message_edit(before, after):
await before.channel.send(f'Soy un botón pero {before.author.display_name} había escrito "{before.content}"')
client.run(TOKEN, log_handler=handler, log_level=logging.DEBUG)
|
IgnacioCurti/discord_bot
|
bot.py
|
bot.py
|
py
| 1,204 |
python
|
en
|
code
| 0 |
github-code
|
50
|
72061712156
|
import sys
sys.setrecursionlimit(10**5)
input = sys.stdin.readline
n, m = map(int, input().split())
A = [[] for _ in range(n+1)]
visited = [False]*(n+1)
def dfs(v):
visited[v] = True
for i in A[v]:
if not visited[i]: # 아직 방문 안한애들 방문
dfs(i)
for _ in range(m):
s, e = map(int, input().split())
A[s].append(e)
A[e].append(s) # 양방향으로 에지 더해주기
cnt = 0
for i in range(1, n+1):
if not visited[i]:
cnt += 1
dfs(i)
print(cnt)
|
cherrie-k/algorithm-python
|
백준/Silver/11724. 연결 요소의 개수/연결 요소의 개수.py
|
연결 요소의 개수.py
|
py
| 562 |
python
|
ko
|
code
| 0 |
github-code
|
50
|
13299875084
|
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
Created on Fri Aug 17 14:22:17 2018
@author: Soumya
"""
import cv2
import numpy as np
original_img = cv2.imread('img2.jpg')
original_img = cv2.resize(original_img, (600,923))
resized_img = cv2.resize(original_img, (300,600))
box_vector ='0 0.10083333333333333 0.6522210184182016 0.20166666666666666 0.08017334777898158'
box_info = box_vector.split(' ')
class_pred = float(box_info[0])
x_center = float(box_info[1])
y_center = float(box_info[2])
width = float(box_info[3])
height = float(box_info[4])
x_center_big = x_center * 600
y_center_big = y_center * 923
height_big = height *923
width_big = width *600
x_center_small = x_center * 300
y_center_small = y_center * 600
height_small = height *600
width_small = width *300
def get_coordinates(x_cent,y_cent,height,width,img_shape):
# img_shape = (height, width)
img_height, img_width = img_shape
x_cent = x_cent * img_width
y_cent = y_cent * img_height
height = height * img_height
width = width * img_width
x1 = x_cent - (width/2)
x2 = x_cent + (width/2)
x3 = x2
x4 = x1
y1 = y_cent - (height/2)
y4 = y_cent + (height/2)
y2 = y1
y3 = y4
# order specified based on cv2.drawContours requirement
box = np.array([[x3,y3], [x4,y4], [x1,y1], [x2,y2]])
return box
box = get_coordinates(x_center,y_center,height,width,(923,600))
box = np.int0(box)
small_box = get_coordinates(x_center,y_center,height,width,(600,300))
small_box = np.int0(small_box)
# Drawing the boxes
original_img = cv2.drawContours(original_img, [box], 0,(255,0,0),2)
resized_img = cv2.drawContours(resized_img, [small_box], 0,(255,0,0),2)
cv2.imshow('original', original_img)
cv2.imshow('resized', resized_img)
cv2.waitKey(0)
cv2.destroyAllWindows()
|
rounakskm/Annotation-Detector
|
box_draw.py
|
box_draw.py
|
py
| 1,842 |
python
|
en
|
code
| 0 |
github-code
|
50
|
18525716526
|
import json
import re
import time
from bs4 import BeautifulSoup
import requests
import openpyxl
import random
import threading
import os
def get_headers():
pc_headers = {
"X-Forwarded-For": '%s.%s.%s.%s' % (random.randint(0, 255), random.randint(0, 255), random.randint(0, 255), random.randint(0, 255)),
"Accept": "text/html,application/xhtml+xml,application/xml;q=0.9,*/*;q=0.8",
"Accept-Encoding": "gzip, deflate",
"Accept-Language": "en-US,en;q=0.5",
"User-Agent": "Mozilla/5.0 (Macintosh; Intel Mac OS X 10_13_2) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/63.0.3239.132 Safari/537.36"
}
return pc_headers
def get_proxies_abuyun():
proxyHost = "http-dyn.abuyun.com"
proxyPort = "9020"
# 代理隧道验证信息
proxyUser = ''
proxyPass = ''
proxyMeta = "http://%(user)s:%(pass)s@%(host)s:%(port)s" % {
"host": proxyHost,
"port": proxyPort,
"user": proxyUser,
"pass": proxyPass,
}
proxies = {
"http": proxyMeta,
"https": proxyMeta,
}
return proxies
class NetWorkError(Exception):
pass
def build_request(url, headers=None, proxies=None):
if headers is None:
headers = get_headers()
for i in range(5):
try:
response = requests.get(
url, headers=headers, proxies=get_proxies_abuyun(), timeout=15)
return response
except Exception as e:
if '429' in str(e):
time.sleep(random.randint(0, 1000)/1000.0)
continue
raise NetWorkError
def write_to_excel(lines, filename, write_only=True):
excel = openpyxl.Workbook(write_only=write_only)
sheet = excel.create_sheet()
for line in lines:
sheet.append(line)
excel.save(filename)
def current_time():
now_time = time.strftime('%Y-%m-%d %H:%M:%S')
return now_time
def get_products():
need_urls = ['https://www.adidas.com.cn/plp/list.json?pf=25-40%2C25-60%2C25-60&pr=-&fo=p25%2Cp25&pn={}&pageSize=120&p=%E7%94%B7%E5%AD%90-%E4%B8%AD%E6%80%A7&isSaleTop=false',
'https://www.adidas.com.cn/plp/list.json?ni=112&pf=25-82%2C25-60%2C25-60&pr=-&fo=p25%2Cp25&pn={}&pageSize=120&p=%E5%A5%B3%E5%AD%90-%E4%B8%AD%E6%80%A7&isSaleTop=false',
'https://www.adidas.com.cn/plp/list.json?ni=139&pf=25-160%2C25-220%2C24-250%2C24-239%2C24-39&pr=-&fo=p25%2Cp25%2Cp24%2Cp24%2Cp24&pn={}&pageSize=120&p=%E7%94%B7%E7%AB%A5-%E5%A5%B3%E7%AB%A5-%E5%A4%A7%E7%AB%A5%EF%BC%888-14%E5%B2%81%EF%BC%89-%E5%B0%8F%E7%AB%A5%EF%BC%884-8%E5%B2%81%EF%BC%89-%E5%A9%B4%E7%AB%A5%EF%BC%880-4%E5%B2%81%EF%BC%89&isSaleTop=false']
result = []
for base_url in need_urls:
page = 1
failed_times = 0
while True:
try:
url = base_url.format(page) + '&_=' + \
str(int(time.time() * 1000))
req = build_request(url)
res = json.loads(req.text)
return_obj = res['returnObject']
if 'view' not in return_obj:
break
except Exception as e:
print(current_time(), '[get_products][request error]', url, e)
failed_times += 1
if failed_times == 3:
break
continue
failed_times = 0
try:
items = return_obj['view']['items']
except Exception as e:
break
for item in items:
base_info = {}
try:
base_info['title'] = item['t']
except:
base_info['title'] = '-'
try:
base_info['s_title'] = item['st']
except:
base_info['s_title'] = ''
try:
base_info['original_price'] = item['lp']
except:
base_info['original_price'] = '-'
try:
base_info['real_price'] = item['sp']
except:
base_info['real_price'] = '-'
base_info['code'] = item['c']
result.append(base_info)
print(current_time(), '[get_products]', 'Url', url, 'OK')
page += 1
return result
def get_ava_sku(item_id):
sku_str = "[]"
for i in range(3):
try:
url = 'https://www.adidas.com.cn/productGetItemIvts/{}.json?_={}'.format(
item_id, str(int(time.time() * 1000)))
req = build_request(url)
res_text = req.text
data = json.loads(res_text)
sku_str = data['skuStr']
break
except:
continue
result = json.loads(sku_str)
return result
def get_product_info(url):
req = build_request(url)
soup = BeautifulSoup(req.text, 'lxml')
item_id = soup.find("input", {"id": 'itemId'}).get("value")
color = soup.find("input", {'id': 'colorDisPaly'}).get('value')
try:
login_info=soup.find('div',{'class':'login-text'}).find('p').get_text()
except Exception as e:
login_info=''
table = soup.find('div', {'class': 'overview product-size'}).find_all("li")
product_size = []
for li in table:
display_size = li.get_text()
size_id = li.get('ipi')
product_size.append([size_id, display_size])
ava_list = get_ava_sku(item_id)
sku_info = []
for item in product_size:
for ava_sku in ava_list:
if item[0] in ava_sku['properties']:
sku_info.append([item[1], ava_sku['availableQty']])
break
return {
'color': color,
'login_info':login_info,
'sku_info': sku_info
}
class AdidasProduct(threading.Thread):
def __init__(self, base_info):
super(AdidasProduct, self).__init__()
self.base_info = base_info
self.pdp_url = self.base_info[-1]
def run(self):
try:
self.product = get_product_info(self.pdp_url)
except Exception as e:
print(current_time(),
'[get_product_info][error]', self.pdp_url, e)
self.product = {'color': '','login_info':'', 'sku_info': []}
self.lines = []
if len(self.product['sku_info']) == 0:
self.lines.append(self.base_info + [self.product['color'],self.product['login_info']])
else:
for sku_item in self.product['sku_info']:
line = self.base_info + [self.product['color'],self.product['login_info']] + sku_item
self.lines.append(line)
def load_products():
products = get_products()
keys = ['title', 's_title', 'original_price',
'real_price', 'code']
items = []
for product in products:
item = []
for key in keys:
value = product[key]
item.append(value)
item.append('https://www.adidas.com.cn/item/' + product['code'])
items.append(item)
if len(items) < 5:
continue
yield items
items = []
yield items
def crawl():
result = []
counter = 0
for products in load_products():
tasks = []
for item in products:
task = AdidasProduct(item)
tasks.append(task)
for task in tasks:
task.start()
for task in tasks:
task.join()
for task in tasks:
result += task.lines
counter += 1
print(current_time(),
'[get_product_info][OK]', task.pdp_url, counter)
current_dir = os.getcwd()
write_to_excel(result, current_dir+'/files/' +
current_time().replace(':', '_')+'_adidas' + '.xlsx')
crawl()
|
19js/Nyspider
|
www.adidas.com.cn/adidas.py
|
adidas.py
|
py
| 7,820 |
python
|
en
|
code
| 16 |
github-code
|
50
|
1376076143
|
import matplotlib as mpl
mpl.use('Agg')
import matplotlib.pyplot as plt
import numpy as np
from gym import spaces
from gym_rlf.envs.rlf_env import RLFEnv, MIN_PRICE, MAX_PRICE
from gym_rlf.envs.Parameters import LotSize, TickSize, sigma, kappa, alpha, factor_alpha, factor_sensitivity, factor_sigma, p_e, M, K
# Stable Baselines recommends to normalize continuous action space because the Baselines
# agents only sample actions from a standard Gaussian.
# We use a space normalizer to rescale the action space to [-LotSize * K, LotSize * K].
ACTION_SPACE_NORMALIZER = LotSize * K
MAX_HOLDING = LotSize * M
class APTEnv(RLFEnv):
def __init__(self):
super(APTEnv, self).__init__('apt_plots/')
# Use a Box to represent the action space with the first param being
# (trade of the security) and the second param being (trade of the factor security).
self.action_space = spaces.Box(
low=np.array([-1, -1]),
high=np.array([1, 1]),
shape=(2,))
# Use a Box to represent the observation space with params: (position of the security),
# (position of the factor security) and (price of the security).
# The price of the factor security is hidden.
self.observation_space = spaces.Box(
low=np.array([-MAX_HOLDING, -MAX_HOLDING, MIN_PRICE]),
high=np.array([MAX_HOLDING, MAX_HOLDING, MAX_PRICE]),
shape=(3,))
def _next_price(self, p, p_f):
rn1 = np.random.normal(0, 1., 1)[0]
rn2 = np.random.normal(0, 1., 1)[0]
factor_return = factor_alpha + factor_sigma * rn1
p_f_new = (1 + factor_return) * p_f
p_f_new = min(p_f_new, MAX_PRICE)
p_f_new = max(p_f_new, MIN_PRICE)
r = alpha + factor_sensitivity * factor_return + sigma * rn2
p_new = (1 + r) * p
p_new = min(p_new, MAX_PRICE)
p_new = max(p_new, MIN_PRICE)
return p_new, p_f_new
def reset(self):
super(APTEnv, self).reset()
self._factor_prices = np.zeros(self._L + 2)
self._factor_prices[0] = p_e
self._factor_positions = np.zeros(self._L + 2)
return self._get_state()
def _get_state(self):
return np.array([self._positions[self._step_counts],
self._factor_positions[self._step_counts],
self._prices[self._step_counts]])
def step(self, action):
ac1 = action[0] * ACTION_SPACE_NORMALIZER
ac2 = action[1] * ACTION_SPACE_NORMALIZER
old_pos = self._positions[self._step_counts]
old_factor_pos = self._factor_positions[self._step_counts]
old_price = self._prices[self._step_counts]
old_factor_price = self._factor_prices[self._step_counts]
self._step_counts += 1
new_pos = self._positions[self._step_counts] =\
max(min(old_pos + ac1, MAX_HOLDING), -MAX_HOLDING)
new_factor_pos = self._factor_positions[self._step_counts] =\
max(min(old_factor_pos + ac2, MAX_HOLDING), -MAX_HOLDING)
new_price, new_factor_price =\
self._prices[self._step_counts], self._factor_prices[self._step_counts] =\
self._next_price(old_price, old_factor_price)
trade_size = abs(new_pos - old_pos) + abs(new_factor_pos - old_factor_pos)
cost = TickSize * (trade_size + 1e-2 * trade_size**2)
PnL = (new_price - old_price) * old_pos + (new_factor_price - old_factor_price) * old_factor_pos - cost
self._costs[self._step_counts] = cost
self._profits[self._step_counts] = PnL + cost
self._rewards[self._step_counts] = PnL - .5 * kappa * PnL**2
return self._get_state(), self._rewards[self._step_counts], self._step_counts >= self._L + 1, {}
def render(self, mode='human'):
super(APTEnv, self).render()
t = np.linspace(0, self._L + 1, self._L + 2)
fig, axs = plt.subplots(5, 1, figsize=(16, 40), constrained_layout=True)
axs[0].plot(t, self._prices)
axs[1].plot(t, self._factor_prices)
axs[2].plot(t, self._positions)
axs[3].plot(t, self._factor_positions)
axs[4].plot(t, np.cumsum(self._rewards))
axs[0].set_ylabel('price')
axs[1].set_ylabel('factor price')
axs[2].set_ylabel('position')
axs[3].set_ylabel('factor position')
axs[4].set_ylabel('cumulative P/L')
plt.title('Out-of-sample simulation of RL agent')
plt.xlabel('steps')
plt.savefig('{}/plot_{}.png'.format(self._folder_name, self._render_counts))
plt.close()
plt.plot(t, np.cumsum(self._costs), label='cumulative costs')
plt.plot(t, np.cumsum(self._profits), label='cumulative profits')
plt.legend()
plt.savefig('{}/costs_and_profits_plot_{}.png'.format(self._folder_name, self._render_counts))
plt.close()
|
sophiagu/RLF
|
gym-rlf/gym_rlf/envs/apt_env.py
|
apt_env.py
|
py
| 4,557 |
python
|
en
|
code
| 7 |
github-code
|
50
|
20548981126
|
def equilibrium(A):
for i in range(len(A)):
sl = 0
for il in range(i):
sl += A[il]
for ir in range(i+1, len(A)):
sl -= A[ir]
if sl == 0:
return i
return -1
def equilibrium_optimised(A):
if len(A)==1:
return 1
left_sum = 0
right_sum = sum(A[1:])
idx = 1
while idx < len(A):
left_sum += A[idx-1]
right_sum -= A[idx]
if left_sum == right_sum:
return idx+1
idx += 1
return -1
A = []
print(equilibrium_optimised(A))
|
shivang98/DS-Algo-Practice
|
equilibrium_index.py
|
equilibrium_index.py
|
py
| 573 |
python
|
en
|
code
| 0 |
github-code
|
50
|
30913849710
|
# Pedir un numero y devolver los numeros primos desde el 0 hasta el ingresado por el usuario
def numeros_primos(num):
for i in range(2, num - 1):
if num % i == 0: return False
return True
def primos_hasta(num):
primos = []
for i in range(3, num + 1):
resultado = numeros_primos(i)
if resultado == True: primos.append(i)
return primos
resultado= primos_hasta(39)
print(resultado)
|
fedemoretto11/apuntes-python
|
Ejercicios practicos 2/ejercicio_practico_2.py
|
ejercicio_practico_2.py
|
py
| 425 |
python
|
es
|
code
| 1 |
github-code
|
50
|
21078475346
|
from flask import Blueprint, Flask, redirect, render_template, request
import repositories.human_repository as human_repo
import repositories.zombie_repository as zombie_repo
import repositories.biting_repository as biting_repo
from models.biting import Biting
bitings_blueprint = Blueprint("bitings", __name__)
import repositories.biting_repository as biting_repository
# INDEX
@bitings_blueprint.route("/bitings")
def bites():
bitings = biting_repository.select_all()
return render_template("bitings/index.html", bitings=bitings)
# NEW
@bitings_blueprint.route("/bitings/new")
def new_bite():
humans = human_repo.select_all()
zombies = zombie_repo.select_all()
return render_template("bitings/new.html", humans=humans, zombies=zombies)
# CREATE
@bitings_blueprint.route("/bitings", methods=["POST"])
def create_bite():
human = request.form["human_id"]
zombie = request.form["zombie_id"]
humans = human_repo.select(human)
zombies = zombie_repo.select(zombie)
bite = Biting(humans, zombies)
biting_repository.save(bite)
return redirect("/bitings")
# EDIT
@bitings_blueprint.route("/bitings/<id>/edit")
def edit_bite(id):
humans = human_repo.select_all()
zombies = zombie_repo.select_all()
biting = biting_repo.select(id)
return render_template('bitings/edit.html', humans=humans, zombies=zombies, biting=biting)
# UPDATE
@bitings_blueprint.route("/bitings/<id>", methods=["POST"])
def update_bite(id):
human = request.form["human_id"]
zombie = request.form["zombie_id"]
humans = human_repo.select(human)
zombies = zombie_repo.select(zombie)
bite = Biting(humans, zombies, id)
biting_repository.update(bite)
return redirect("/bitings")
# DELETE
@bitings_blueprint.route("/bitings/<id>/delete", methods=["POST"])
def delete_bite(id):
biting_repo.delete(id)
return redirect("/bitings")
|
fionaberkery/zombie_land
|
controllers/bitings_controller.py
|
bitings_controller.py
|
py
| 1,896 |
python
|
en
|
code
| 0 |
github-code
|
50
|
36106666135
|
# remove duplicate elememts from list
list_l=[1,3,7,5,6,4,7,8,6,4]
list_set=set(list_l)
print(list(list_set))
# dynamic
list_l=[]
list_b=int(input())
for i in range(list_b):
list_c=int(input("enter the values"))
list_l.append(list_c)
print(list_l)
# read N lines of input and create a nested list each lines as a list
list_a=[1,2,3]
list_b=[4,5,6]
list_c=[6,7,3]
list_d=list[list[list_a],list[list_b],list[list_c]]
print(list_d)
# Min and Max values in the list of Tuples
tuple_1=(22,13,24,15)
tuple_2=(23,45,67,46)
list_tup=list(tuple_1)
list_tup1=list(tuple_2)
for each in list_tup:
for each2 in list_tup1:
if each<each2:
print("max Number")
else:
print("Min Number")
|
mathekeerthana/Capstone
|
Lists.py
|
Lists.py
|
py
| 768 |
python
|
en
|
code
| 0 |
github-code
|
50
|
38139517895
|
import tkinter as tk
from PIL import Image, ImageTk
from lib.modString import addString, minusString
class drop:
"""
@ parent: frame that the "drop" is on
@ name: string representing the drop's name
@ raid_boss: the boss that drops 'drop'
@ r, c: position in the grid
@ cur count
@ total count
########################################
@ parent: frame
@ name: string, name of item drop
@ raid_boss: boss class, name of boss that this drop belongs to
@ cur_count: string, current count
@ total_count: string, total
@ label: label, total(current) displayed
"""
def __init__(self, parent, name, raid_boss, r, c, total='0', cur='0'):
self.parent = parent
self.name = name
self.raid_boss = raid_boss
self.cur_count = cur
self.total_count = total
self.label = tk.Label(parent, text=self.paint(), font=("Arial", 12))
image = Image.open("img/"+raid_boss.name+"/"+self.name+".jpg")
photo = ImageTk.PhotoImage(image)
l = tk.Label(parent, image=photo)
l.image = photo
l.grid(row=r, column=c)
l.bind('<Button-1>', self.increment)
l.bind('<Button-3>', self.decrement)
self.label.grid(row=r+1, column=c)
def increment(self, event=None):
self.cur_count = addString(self.cur_count, '1')
self.total_count = addString(self.total_count, '1')
self.label['text'] = self.paint()
self.raid_boss.total = addString(self.raid_boss.total, '1')
self.raid_boss.cur = addString(self.raid_boss.cur, '1')
self.raid_boss.count_label['text'] = self.raid_boss.paint()
def paint(self):
return self.cur_count+'('+self.total_count+')'
def decrement(self, event=None):
if self.cur_count == '0' or self.total_count == '0':
return
self.cur_count = minusString(self.cur_count, '1')
self.total_count = minusString(self.total_count, '1')
self.label['text'] = self.paint()
self.raid_boss.total = minusString(self.raid_boss.total, '1')
self.raid_boss.cur = minusString(self.raid_boss.cur, '1')
self.raid_boss.count_label['text'] = self.raid_boss.paint()
|
villestring/GBF-Blue-chest-counter
|
lib/drop.py
|
drop.py
|
py
| 1,993 |
python
|
en
|
code
| 0 |
github-code
|
50
|
21943173149
|
from datetime import datetime
class ParserBS():
item_order = 1
current_page = 1
sequential_errors = 0
def get_all_specifications(self, page, url):
specification_elements = page.select('div#detailSpecContent div#Specs fieldset dl')
specifications = dict()
try:
for element in specification_elements:
obj = self.get_field_set_content(element)
specifications.update(obj)
specifications.update({
'platform_id': self.get_platform_id(url),
'images_urls': self.get_images(page),
'url': url,
'platform': 'NewEgg',
'item_order': self.item_order,
'crawled_date': datetime.now().strftime("%d/%m/%Y %H:%M:%S")
})
except:
if self.sequential_errors < 10:
self.sequential_errors = self.sequential_errors + 1
print('-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-')
print('Error when Crawling Component : ' + str(self.item_order-1))
print('Component Url: ' + url)
print('Component Current Page: ' + self.current_page)
self.get_all_specifications(page, url)
else:
exit()
if self.sequential_errors > 0:
self.sequential_errors = 0
print('======================================')
print('Crawled Components: ' + str(self.item_order))
self.item_order = self.item_order + 1
return specifications
def get_field_set_content(self, element):
titleElement = element.select('dt a')
title = ''
if len(title) == 0:
title = element.select('dt')[0].getText()
else:
title = titleElement[0].getText()
content = element.select('dd')[0].getText()
obj = dict()
obj.update({
title: content
})
return obj
def get_maximum_page(self, page):
maximum_page = page.select('span.list-tool-pagination-text strong')[0].text.split('/')[1]
self.maximum_page = int(maximum_page)
return self.maximum_page
def get_platform_id(self, url):
splitted = url.split('/')
return splitted[len(splitted)-1]
def get_images(self, response):
images_elements = response.select('div.objImages ul.navThumbs img')
images = []
for image_element in images_elements:
images.append(image_element.attrs['src'].replace('CompressAll35', ''))
return images
def get_all_components(self, response):
components = response.select('.items-view>.item-container:not(.is-feature-item)')
print('======================================')
print('Page Components: ' + str(len(components)))
return components
def get_url_from_component(self, response):
return response.select('a.item-title')[0].attrs['href']
def get_next_page_url(self, url):
next_page_url = url.replace(f'Page-{self.current_page}', f'Page-{self.current_page+1}')
self.next_page(next_page_url)
return next_page_url
def next_page(self, next_page_url):
self.current_page = self.current_page + 1
print('######################################')
print('######### Crawling Next Page: ' + str(self.current_page))
print('######### Next Page Url: ' + next_page_url)
print('######################################')
|
eduardosbcabral/HWParts-Crawler
|
beautiful_soup/parser_bs.py
|
parser_bs.py
|
py
| 3,566 |
python
|
en
|
code
| 0 |
github-code
|
50
|
74417402076
|
import matplotlib.pyplot as plt, numpy as np, pandas as pd
# general functions for plotting
# Tim Tyree
# 7.23.2021
def PlotTextBox(ax,text,text_width=150.,xcenter=0.5,ycenter=0.5,fontsize=20, family='serif', style='italic',horizontalalignment='center',
verticalalignment='center', color='black',use_turnoff_axis=True,**kwargs):
txt=ax.text(xcenter,ycenter,text,horizontalalignment=horizontalalignment,
verticalalignment=verticalalignment, transform = ax.transAxes, fontsize=fontsize, color='black', wrap=True,**kwargs)
txt._get_wrap_line_width = lambda : text_width
if use_turnoff_axis:
ax.axis('off')
def text_plotter_function(ax,data):
text=data
# ax.text(0.5, 0.5, text, family='serif', style='italic', ha='right', wrap=True)
PlotTextBox(ax,text,fontsize=10)
return True
def format_plot_general(**kwargs):
return format_plot(**kwargs)
def format_plot(ax=None,xlabel=None,ylabel=None,fontsize=20,use_loglog=False,xlim=None,ylim=None,use_bigticks=True,**kwargs):
'''format plot formats the matplotlib axis instance, ax,
performing routine formatting to the plot,
labeling the x axis by the string, xlabel and
labeling the y axis by the string, ylabel
'''
if not ax:
ax=plt.gca()
if use_loglog:
ax.set_xscale('log')
ax.set_yscale('log')
if xlabel:
ax.set_xlabel(xlabel,fontsize=fontsize,**kwargs)
if ylabel:
ax.set_ylabel(ylabel,fontsize=fontsize,**kwargs)
if use_bigticks:
ax.tick_params(axis='both', which='major', labelsize=fontsize,**kwargs)
ax.tick_params(axis='both', which='minor', labelsize=0,**kwargs)
if xlim:
ax.set_xlim(xlim)
if ylim:
ax.set_xlim(ylim)
return True
def FormatAxes(ax,x1label,x2label,title=None,x1lim=None,x2lim=None,fontsize=16,use_loglog=False,**kwargs):
if x1lim is not None:
ax.set_xlim(x1lim)
if x2lim is not None:
ax.set_ylim(x2lim)
if title is not None:
ax.set_title(title,fontsize=fontsize)
format_plot(ax, x1label, x2label, fontsize=fontsize, use_loglog=use_loglog,**kwargs)
return True
def plot_horizontal(ax,xlim,x0,Delta_thresh=1.,use_Delta_thresh=False):
#plot the solid y=0 line
x=np.linspace(xlim[0],xlim[1],10)
ax.plot(x,0*x+x0,'k-')
if use_Delta_thresh:
#plot the dotted +-Delta_thresh lines
ax.plot(x,0*x+Delta_thresh+x0,'k--',alpha=0.7)
ax.plot(x,0*x-Delta_thresh+x0,'k--',alpha=0.7)
return True
|
timtyree/bgmc
|
python/lib/viewer/bluf/plot_func.py
|
plot_func.py
|
py
| 2,515 |
python
|
en
|
code
| 0 |
github-code
|
50
|
22541760133
|
'''
将上周没有航班信息的机场数据在下周在爬取一遍
'''
import codecs
import pandas as pd
import csv
import requests
import re
import json
import pymysql as py
def readCSV2List(filePath):
try:
file=open(filePath,'r',encoding="gb18030")# 读取以utf-8
context = file.read() # 读取成str
list_result=context.split("\n")# 以回车符\n分割成单独的行
#每一行的各个元素是以【,】分割的,因此可以
length=len(list_result)
for i in range(length):
list_result[i]=list_result[i].split(",")
return list_result
except Exception :
print("文件读取转换失败,请检查文件路径及文件编码是否正确")
finally:
file.close();# 操作完成一定要关闭
list1 = readCSV2List(r'D:\no.csv')
list3 = []
def flg_code():
list2 = list1[1:-1]
for i in list2:
list3.append(i[2])
flg_code()
def getConnection():
return py.connect(host='localhost', user='root', password='hh226752',db = 'flightradar24', charset = 'utf8' )
# 加载数据库中数据
conn = getConnection()
cur = conn.cursor()
cn = cur.execute('select * from test')
rows = cur.fetchall()
rows = list(rows)
airport_info = []
for i in rows:
list1 = list(i)
airport_info.append(list1)
url = 'https://www.flightradar24.com/data/airports/{}/routes'
None_info_Airport=[]
to_csv = []
for i in list3:
try:
new_url = url.format(i)
headers = {'User-Agent':'Mozilla/5.0 (Windows NT 10.0; WOW64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/58.0.3029.81 Safari/537.36'}
response = requests.get(url=new_url, headers=headers,verify=True)
html = response.text
L = re.search('arrRoutes=\[(.*?)\]', html, re.S).group(1)
ss = re.findall('{.*?}', L)
for j in ss:
l = json.loads(j)
flg_code = l['iata'] # 机场代码
flg_name = l['name'] # 机场名称
flg_city = l['city'] # 机场所在城市
flg_country = l['country'] # 机场所在国家
flg_lat = l['lat'] # 机场的经度
flg_lon = l['lon'] # 机场的纬度
list2 = [i,flg_country,flg_name,flg_code,flg_city,flg_lat,flg_lon]
to_csv.append(list2)
print(flg_country,flg_code,flg_name,flg_city,flg_lat,flg_lon)
except AttributeError:
None_info_Airport.append(i)
#print('没有航线信息的机场:',i)
# to_csv = []
# for i in airport_info:
# for k in list3:
# if k in i:
# to_csv.append(i[1:])
# print(to_csv)
#
# 新增机场数据
name = ['起飞机场代码','国家名称','机场名称','机场代码','机场经度','机场纬度']
test = pd.DataFrame(columns=name, data=to_csv)
test.to_csv('D:\新增机场信息列表.csv')
# 得到一个list 以后, 通过list里面的每个元素 在主表中查找信息,然后写入新的csv 或者mysql 数据库中
# list 里面有499个数据, select * from test where airports_code = '?'
# 表名 test 列名airprots_code list1 = [] 通过 list1 中元素, 在airport_code 中查出全部信息来
|
kidword/spider
|
机场抓取信息/检查.py
|
检查.py
|
py
| 3,166 |
python
|
en
|
code
| 2 |
github-code
|
50
|
42198469647
|
#!/usr/bin/env python3
import sys
import os.path
import re
read_mapped = 0
total_reads = 0
def load_annotations(infile):
ret = {}
with open(infile, 'r') as f:
data = f.read().split('\n')
for line in data:
if not line:
continue
entries = line.split(',')
ret.setdefault(entries[0], entries[1:])
return ret
def load_resfams_metadata(infile):
ret = {}
with open(infile, 'r') as f:
data = f.read().split('\n')[1:]
for line in data:
if not line:
continue
entry = line.split('\t')
ret[entry[0]] = entry[1]
return ret
def load_domains(infile):
ret = {}
with open(infile, 'r') as f:
data = f.read().split('\n')
for line in data:
if line.startswith('#') or not line:
continue
entry = line.split()
contig = entry[-3].replace('>', '')
locs = [int(x) for x in entry[0].split('|')[-2:]]
ret.setdefault(contig, []).append((int(locs[0]), int(locs[1]), entry[3]))
return ret
def parse_cigar(s):
length = 0
ret = re.findall(r'(\d+)([A-Z=]{1})', s)
universe = {'X', 'P', 'I', 'N', 'D', '=', 'M'}
for occ, op in ret:
if op in universe:
length += int(occ)
return length
class SamParser:
"""This object takes as input a SAM file path and constructs an iterable that outputs
hash-mapping of header to sequence information. Only one line will be held in memory at a time using this method.
"""
def __init__(self, filepath):
"""
constructor
@param filepath: filepath to the input raw SAM file.
"""
if os.path.exists(filepath): # if file is a file, read from the file
self.sam_file = str(filepath)
self.stdin = False
elif not sys.stdin.isatty(): # else read from standard in
self.stdin = True
else:
raise ValueError("Parameter filepath must be a SAM file")
self.current_line = None
self.reads_mapping = 0
self.reads_total = 0
self.header_lens = {}
def __iter__(self):
return self
@property
def _iterate(self):
# Skip all leading whitespace
while True:
if self.stdin:
sam_line = sys.stdin.readline() # read from stdin
else:
sam_line = self.sam_file.readline() # read from file
if not sam_line:
return # End of file
if sam_line[0] != '@': # these lines are the actual reads
self.reads_total += 1
temp = sam_line.split()
if (int(temp[1]) & 4) == 0:
self.reads_mapping += 1
return temp[2], temp[0], int(temp[3]), temp[5] # RefName, header, 1-start, CIGAR
self.sam_file.close() # catch all in case this line is reached
assert False, "Should not reach this line"
def __next__(self):
if not self.stdin and type(self.sam_file) is str: # only open file here if sam_file is a str and not fileIO
self.sam_file = open(self.sam_file, "r")
value = self._iterate
if not value: # close file on EOF
if not self.stdin:
self.sam_file.close()
global reads_mapped
global total_reads
reads_mapped = self.reads_mapping
total_reads = self.reads_total
raise StopIteration()
else:
return value
if __name__ == '__main__':
counts = {}
output_annot = ""
D = load_domains(sys.argv[1])
A = load_annotations(sys.argv[2])
R = load_resfams_metadata(sys.argv[3])
total = sys.argv[4]
param_string = sys.argv[1].split('/')[-2].replace('_', ',')
for refname, simulated, start, cigar in SamParser('-'):
if simulated[0:2] == 'gi':
continue
# RefName, 1-start, CIGAR, RefLen, ReadSeq
stop = start + parse_cigar(cigar) - 1
header = '-'.join(simulated.split('-')[:-2])
class_annot = A[header][0]
output_annot = class_annot
if refname not in D:
continue
model_hits = set()
for triplets in D[refname]:
if max(start, triplets[0]) <= min(stop, triplets[1]):
if R[triplets[2]] != 'NA':
model_hits.add(triplets[2])
if model_hits:
for target in model_hits:
class_target = R[target]
if class_annot == class_target:
counts.setdefault(simulated, 0)
if counts[simulated] < 2:
counts[simulated] += 1
sys.stdout.write('resfams,{},{},{},{},{}\n'.format(
param_string,
0,
output_annot,
str(sum([y for y in counts.values()])),
total
))
|
lakinsm/meta-marc-publication
|
scripts/count_num_classified_resfams.py
|
count_num_classified_resfams.py
|
py
| 4,101 |
python
|
en
|
code
| 1 |
github-code
|
50
|
37324197795
|
from turtle import*
def drSquare(le, color):
shape("turtle")
pencolor(color)
for i in range(4):
forward(le)
left(90)
# mainloop()
# drSquare(100,"red")
for i in range(30):
drSquare(i * 5, 'red')
left(17)
penup()
forward(i * 2)
pendown()
|
huyhieu07/nguyenhuyhieu-c4e-16-labs-
|
lab03/nhap.py
|
nhap.py
|
py
| 293 |
python
|
en
|
code
| 0 |
github-code
|
50
|
43598959090
|
from flask import Flask,request,abort
import dataset
import json
import datetime
app=Flask(__name__)
db = dataset.connect('sqlite:///data/nobel_winners.db')
@app.route('/api/winners')
def get_country_data():
print('Request args:'+str(dict(request.args)))
query_dict={}
for key in ['country','category','year']:
arg=request.args.get(key)
if arg:
query_dict[key]=arg
winners = list(db['winners'].find(**query_dict))
if winners:
return dumps(winners)
abort(404)
class JSONDateTimeEncoder(json.JSONEncoder):
def default(self,obj):
if(isinstance(obj,(datetime.date,datetime.datetime))):
return obj.isoformat()
else:
return json.JSONEncoder.default(self,obj)
def dumps(obj):
return json.dumps(obj,cls=JSONDateTimeEncoder)
if __name__=='__main__':
app.run(port=8000,debug=True)
|
nationcall/dataviz
|
D3/data_viz_JS_py/flask_serve/server_sql.py
|
server_sql.py
|
py
| 804 |
python
|
en
|
code
| 0 |
github-code
|
50
|
18259038033
|
#from player import Player
#tim = Player("Tim")
from enemy import Enemy , Troll, Vampyre, Vampyreking
dracula = Vampyreking("Dracula")
print(dracula)
dracula.take_damage(12)
print(dracula)
#random_monster = Enemy("Basic Enemy",12,1)
#print(random_monster)
#random_monster.take_damage(4)
#print(random_monster)
#random_monster.take_damage(8)
#print(random_monster)
#random_monster.take_damage(9)
#print(random_monster)
print("*********************************** \n \n")
ugly_troll = Troll("pug")
print("ugly troll-{}".format(ugly_troll))
another_troll = Troll("UG")
print("Another troll-{}".format(another_troll))
another_troll.take_damage(10)
#brother = Troll("Urg")
#print(brother)
ugly_troll.grant()
vamp = Vampyre("vald")
print(vamp)
vamp.take_damage(5)
#while vamp.alive:
# vamp.take_damage(1)
# print(vamp)
|
sarangp323/counting_freq
|
python_oops/inherit.py
|
inherit.py
|
py
| 892 |
python
|
en
|
code
| 0 |
github-code
|
50
|
18161154203
|
from config_joker import Config, JsonFileSource
def example():
config = Config(
sources=[
JsonFileSource(
file_path='./examples/json/config.json',
config_path='external_config_key[0].config'
)
]
)
print(config.required(key='external_key[0]key'))
if __name__== '__main__':
example()
|
joaopedromgoulart/config-joker
|
examples/json/example_config_json.py
|
example_config_json.py
|
py
| 373 |
python
|
en
|
code
| 0 |
github-code
|
50
|
40775602339
|
# -*- coding: utf-8 -*-
"""
Created on Mon Oct 11 14:34:06 2021
@author: bhs89
"""
import turtle
import random
turtle.clearscreen()
tt = turtle.Turtle()
scr = turtle.Screen()
image1 = 'muji.gif'
image2 = 'brown-line.gif'
image3 = 'kakao_lion.gif'
scr.addshape(image1)
scr.addshape(image2)
scr.addshape(image3)
t1 = turtle.Turtle()
t1.shape(image1)
t1.penup()
t1.goto(-300,300)
t1.pendown()
t2 = turtle.Turtle()
t2.shape(image2)
t2.penup()
t2.goto(-300,0)
t2.pendown()
t3 = turtle.Turtle()
t3.shape(image3)
t3.penup()
t3.goto(-300,-300)
t3.pendown()
for i in range(100):
speed1 = random.randint(1, 10)
t1.forward(speed1)
speed2 = random.randint(1, 10)
t2.forward(speed2)
speed3 = random.randint(1, 10)
t3.forward(speed3)
turtle.done()
|
Bae-hong-seob/2021-2-University_2_2
|
빅데이터언어/실습/racing.py
|
racing.py
|
py
| 815 |
python
|
en
|
code
| 0 |
github-code
|
50
|
22497232452
|
# -*- coding: utf-8 -*-
import logging
import math
import os
import random
import time
import urllib
from collections import Counter
import requests
os.makedirs("logs", exist_ok=True)
headers = {
'User-Agent': 'Mozilla/5.0 (Windows NT 5.1; rv:14.0) Gecko/20100101 Firefox/14.0.1',
'Referer': 'http://google.com'
}
requests_session = requests.Session()
requests_session.headers.update(headers)
quotes_file = open('raisin/quotes', 'r')
quotes = quotes_file.readlines()
quote_index = 0
random.shuffle(quotes)
def random_quote(sender):
global quote_index
reply = quotes[quote_index].strip('\r\n')
quote_index += 1
if '/me' in reply:
reply = '\x01%s\x01' % reply
return reply.replace('%s', sender).replace('/me', 'ACTION')
def flatten(l):
return [item for sublist in l for item in sublist]
def pastebin(text):
# [!] Assumes text has been decoded to utf-8
text = text.encode('utf-8')
params = urllib.urlencode({'api_dev_key': '07a1c8f8a60611c983b2345ea38c1123', 'api_paste_code': text, 'api_option': 'paste'})
paste = urllib.urlopen('http://pastebin.com/api/api_post.php', params).read()
return paste.replace('.com/', '.com/raw.php?i=')
def sprunge(text):
# [!] Assumes text has been decoded to utf-8
text = text.encode('utf-8')
params = urllib.urlencode({'sprunge': text})
paste = urllib.urlopen('http://sprunge.us', params).read()
return paste.lstrip(' ')
def is_number(message):
return message.replace('.', '', 1).isdigit()
# Shannon entropy
def entropy(message):
counts = Counter(message)
l = len(message)
return -sum(count / l * math.log(count / l, 2) for count in counts.values())
def logger(name):
console_formatting_string = "%(asctime)s %(name)s: %(message)s"
if name in ("bot", "parser"):
console_formatting_string = "%(asctime)s %(message)s"
console_formatter = logging.Formatter(console_formatting_string)
console_handler = logging.StreamHandler()
console_handler.setFormatter(console_formatter)
console_handler.setLevel(logging.INFO)
file_formatter = logging.Formatter("%(asctime)s %(message)s")
file_handler = logging.FileHandler(f"logs/{name}.log")
file_handler.setFormatter(file_formatter)
file_handler.setLevel(logging.DEBUG)
logger = logging.getLogger(name)
logger.setLevel(logging.DEBUG)
logger.addHandler(console_handler)
logger.addHandler(file_handler)
return logger
|
superseal/raisin
|
raisin/utils.py
|
utils.py
|
py
| 2,483 |
python
|
en
|
code
| 0 |
github-code
|
50
|
24314432477
|
import os
import torch
import pickle as pkl
from collections import Counter
from torchtext.vocab import Vocab
from eval import Model, load_checkpoint, load_vocabs
from model import get_pbg, save_vocab
BASE_PATH = './models/unified'
def unify_ents(e1, e2):
e = set(e1).union(set(e2))
e.remove('<unk>')
e.remove('<pad>')
return e
def create_vocabs(ents, rel_vocab):
ent_vocab = Vocab(Counter(ents))
vectors = get_pbg(ents, '../../embeddings', 'unified_embs.txt')
ent_vocab.set_vectors(vectors.stoi, vectors.vectors, vectors.dim)
save_vocab(os.path.join(BASE_PATH, 'vocab.pkl'), ent_vocab, rel_vocab)
def combine_models(model1, model2, beta=0.5):
params1 = dict(model1)
params2 = dict(model2)
for name in params2.keys():
if name in params1:
if name == 'ent_embedding.weight' or name == 'rel_embedding.weight':
continue
print('Combining layer {}... {} {}'.format(name, params1[name].data.size(), params2[name].data.size()))
params1[name].data.copy_((1-beta)*params2[name].data + beta*params1[name].data)
return params1
if __name__ == '__main__':
model_names = ['wiki_gold_1M', 'gold_openie_50k_docs_balanced', 'openie_50k_docs']
model_paths = ['./models/{}'.format(name) for name in model_names]
wikidata, combined, wikipedia = [load_checkpoint(os.path.join(path, 'best_model.pt')) for path in model_paths]
(ent_wikidata, rel_wikidata), (_, _), (ent_wikipedia, rel_wikipedia) = [load_vocabs(os.path.join(path, 'vocab.pkl')) for path in model_paths]
state_dict = combine_models(wikidata.state_dict(), combined.state_dict(), beta=0.8)
state_dict = combine_models(state_dict, wikipedia.state_dict(), beta=0.5)
state_dict['ent_embedding.weight'] = wikipedia.state_dict()['ent_embedding.weight']
state_dict['rel_embedding.weight'] = wikipedia.state_dict()['rel_embedding.weight']
umodel = Model(200, len(ent_wikipedia), len(rel_wikipedia), 200)
umodel.load_state_dict(state_dict)
torch.save(umodel.state_dict(), os.path.join(BASE_PATH, 'best_model.pt'))
|
rahular/coref-rl
|
wiki/reward/combine_models.py
|
combine_models.py
|
py
| 2,120 |
python
|
en
|
code
| 9 |
github-code
|
50
|
39291268405
|
import dash
from dash import dcc, html
import dash_bootstrap_components as dbc
app = dash.Dash(__name__,external_stylesheets=[dbc.themes.SLATE],use_pages=True)
server=app.server
app.config.suppress_callback_exceptions=True
sidebar=dbc.Nav(
[
dbc.NavLink(
[
html.Div(page["name"],className="ms-2",style={'textAlign' : 'center', 'color':'linen'}),
],
href=page["path"],
active="exact",
)
for page in dash.page_registry.values()
],
vertical=True,
pills=True,
className="btn-outline-light",
)
app.layout = dbc.Container([
dbc.Row([
dbc.Col(html.Div("Roasting Terminal",
style={'fontSize':50,'textAlign' : 'center','color' : 'linen'},
className='text-m-center mb-m-4')),
]),
# html.Div([
# dcc.Link(page['name']+" | ",href=page['path'])
# for page in dash.page_registry.values()
# ]),
html.Hr(),
dbc.Row(
[
dbc.Col([
sidebar
],xs=4,sm=4,md=2,lg=2,xl=2,xxl=2),
dbc.Col(
[
dash.page_container
],xs=8,sm=8,md=10,lg=10,xl=10,xxl=10)
]
)
], fluid=True)
if __name__ == '__main__' :
app.run_server(debug=True)
|
DuaneIndustries/CoffeeRoasteryDash
|
app.py
|
app.py
|
py
| 1,355 |
python
|
en
|
code
| 0 |
github-code
|
50
|
32115819718
|
import sys
from PyQt5.QtCore import *
from PyQt5.QtGui import *
from PyQt5.QtWidgets import *
class Main(QMainWindow):
def __init__(self, parent = None):
QMainWindow.__init__(self, parent)
self.InitUi()
def InitUi(self):
ql = QLabel(self)
ql.setText("<font color=\"blue\">Hello</font><font color=\"red\">Hello</font>")
def main():
app = QApplication(sys.argv)
main_window = Main()
main_window.show()
app.exec_()
if __name__ == "__main__":
main()
|
MinTimmy/Data_Structure
|
First_semester/Demo1/all/test7.py
|
test7.py
|
py
| 522 |
python
|
en
|
code
| 0 |
github-code
|
50
|
23889431271
|
#!/usr/bin/env python2.7
from __future__ import print_function
import sys, os, glob, logging
from argparse import ArgumentParser
from BaseSpacePy.api.BaseSpaceAPI import BaseSpaceAPI
from BaseSpacePy.model.QueryParameters import QueryParameters as qp
list_options = qp({'Limit': 1024})
logging.basicConfig(
level=logging.INFO,
format='[%(asctime)s] %(message)s',
datefmt='%d/%m/%Y %H:%M:%S',
)
bs = BaseSpaceAPI()
user = bs.getUserById('current')
logging.info("User Name: %s", user)
projects = bs.getProjectByUser(list_options)
project_list = [project.Name for project in projects]
cli = ArgumentParser()
cli.add_argument('project', nargs='?', help='Which project to download files from. When not specified, list projects instead.')
cli.add_argument('--dry-run', '-n', action='store_true', help='Only show which files would be downloaded without downloading them.')
cli.add_argument('--dir', '-d', default='.', help='Directory to download samples to.')
args = cli.parse_args()
if not args.project:
print(*project_list, sep='\n')
sys.exit(0)
p = args.project
try:
idx = project_list.index(p)
project = projects[idx]
except ValueError:
logging.error(
'%r is not in your projects. Available projects are:\n%s',
p, '\n'.join(project_list),
)
sys.exit(1)
# get already downloaded fastq
downloaded = {f.split('/')[-1] for f in glob.glob(args.dir + '/*fastq.gz')}
logging.info("Retrieving samples from project %s", p)
samples = project.getSamples(bs, list_options)
logging.info("Samples for this project: %s", samples)
for sample in samples:
logging.info("Retrieving files in sample %s", sample)
for f in sample.getFiles(bs):
if f.Name not in downloaded:
logging.info("Downloading file %s", f.Name)
if args.dry_run:
continue
f.downloadFile(bs, args.dir)
|
Teichlab/basespace_fq_downloader
|
download_fq_from_basespace.py
|
download_fq_from_basespace.py
|
py
| 1,886 |
python
|
en
|
code
| 4 |
github-code
|
50
|
18356819426
|
import pathlib
import pytest
BASE_PATH = pathlib.Path('docssrc/source/')
def plot(path):
_path = BASE_PATH / path
name = _path.name
_path = _path.parent
def wraps(fn):
@pytest.mark.skipif(
(_path / (name + '.png')).exists()
and (_path / (name + '.svg')).exists(),
reason=f'Output plot already exists, {_path / name}'
)
def inner(*args, **kwargs):
fig = fn(*args, **kwargs)
_path.mkdir(exist_ok=True, parents=True)
fig.savefig(str(_path / (name + '.png')))
fig.savefig(str(_path / (name + '.svg')))
return inner
return wraps
|
Peilonrayz/dice_stats
|
docssrc/source/_plots/env.py
|
env.py
|
py
| 665 |
python
|
en
|
code
| 3 |
github-code
|
50
|
28077578353
|
# int, float, str, bool
# int -> str; str -> int
# float -> str; str -> float
j = 7
k = str(j)
a = float(input())
b = float(input())
print(a + b, a - b, a * b, a / b, a ** b)
c = 7
d = 8.4
print(c + d, type(c + d))
# bool: True, False
# все что пустое и все что 0 => False, все остальное - True
print(bool("123"), bool(""), bool(123), bool(0.0))
print(bool("0"))
|
GerasimovRM/MMSP
|
lesson2/1.py
|
1.py
|
py
| 401 |
python
|
ru
|
code
| 0 |
github-code
|
50
|
25192773855
|
from flask import Blueprint, jsonify, request
from models import Departement, Role, RoleSchema, User, db
#blueprint setup
role = Blueprint('role',__name__)
@role.route('/AddRole', methods = ['POST'])
def AddRole():
req_Json = request.json
name = req_Json['name']
nameDepartement= req_Json['nameDepartement']
departement = Departement.query.filter_by(name=nameDepartement).first()
idDepartement = departement.id
role = Role(name,idDepartement,nameDepartement)
try:
db.session.add(role)
db.session.commit()
except Exception:
return "0" #Name already used
return "1" #Add successfully
@role.route('/UpdateRole/<int:_id>', methods = ['PUT'])
def UpdateRole(_id):
req_Json = request.json
role = Role.query.get(_id)
users = User.query.filter_by(role = role.name)
role.name = req_Json['name']
role.nameDepartement= req_Json['nameDepartement']
try:
for u in users:
u.role = role.name
departement = Departement.query.filter_by(name=role.nameDepartement).first()
role.idDepartement = departement.id
db.session.commit()
except Exception:
return "0" #Name already used
return '1' #Role updated !!
@role.route('/GetAllRole', methods = ['GET'])
def GetAllRole():
roles = Role.query.all()
role_schema = RoleSchema(many=True)
output = role_schema.dump(roles)
return jsonify({'Roles' : output})
@role.route('/GetListRoleByDepartement/<string:_DepartementName>', methods = ['GET'])
def GetListRoleByDepartement(_DepartementName):
roles = Role.query.filter(
Role.nameDepartement == _DepartementName
)
role_schema = RoleSchema(many=True)
output = role_schema.dump(roles)
return jsonify({'Roles' : output})
@role.route('/DeleteRole/<int:_id>', methods = ['DELETE'])
def DeleteRole(_id):
role = Role.query.get(_id)
users = User.query.filter_by(role = role.name)
for u in users:
u.role = "Null"
db.session.delete(role)
db.session.commit()
return '1' #SubCategory deleted !!
|
sofieneMoka/GED_APP_BACKEND
|
views/role.py
|
role.py
|
py
| 2,105 |
python
|
en
|
code
| 1 |
github-code
|
50
|
24800009720
|
#!/usr/bin/python3
# -*- coding: utf-8 -*-
from utils import CSV
import time
from connections import Mysql
from interfaces import Field
############ Etapa 1
mysqlClient = Mysql()
# utilCsv = CSV()
tableName="planilha_dyego"
dm1Name="dm1_dyego"
dm2Name="dm2_dyego"
whereEmptyName="nome = ''"
whereEmptyEmail="email = ''"
whereEmptyPwd="pwd = ''"
whereEmptyIp="ip = ''"
whereEmptyDate="data = ''"
whereEmptyHour="hora = ''"
csvPath="access.csv"
"""
fields = []
fields.append( Field('id', 'INT NOT NULL AUTO_INCREMENT') )
fields.append( Field('nome', 'VARCHAR(100)') )
fields.append( Field('email', 'VARCHAR(100)') )
fields.append( Field('pwd', 'VARCHAR(30)') )
fields.append( Field('ip', 'VARCHAR(30)') )
fields.append( Field('data', 'VARCHAR(30)') )
fields.append( Field('hora', 'VARCHAR(30)') )
mysqlClient.createTable(tableName=tableName, fields=fields)
mysqlClient.showListDatabaseNames()
dataToInsert = utilCsv.openCsvByName(csvPath)
mysqlClient.insertManyIntoPlanilhaDyego(dataToInsert)
mysqlClient.showListDataByTable(tableName)
"""
############ Etapa 2
"""
print('Show empty name')
mysqlClient.getByTableAndWhere(tableName, whereEmptyName)
print('Show empty email')
mysqlClient.getByTableAndWhere(tableName, whereEmptyEmail)
print('Show empty pwd')
mysqlClient.getByTableAndWhere(tableName, whereEmptyPwd)
print('Show empty ip')
mysqlClient.getByTableAndWhere(tableName, whereEmptyIp)
print('Show empty date')
mysqlClient.getByTableAndWhere(tableName, whereEmptyDate)
print('Show empty hour')
mysqlClient.getByTableAndWhere(tableName, whereEmptyHour)
mysqlClient.deleteByTableAndWhere(tableName, whereEmptyName)
mysqlClient.deleteByTableAndWhere(tableName, whereEmptyEmail)
mysqlClient.deleteByTableAndWhere(tableName, whereEmptyPwd)
mysqlClient.deleteByTableAndWhere(tableName, whereEmptyIp)
mysqlClient.deleteByTableAndWhere(tableName, whereEmptyDate)
mysqlClient.deleteByTableAndWhere(tableName, whereEmptyHour)
"""
"""
fields = []
fields.append( Field('id', 'INT NOT NULL AUTO_INCREMENT') )
fields.append( Field('nome', 'VARCHAR(100)') )
fields.append( Field('email', 'VARCHAR(100)') )
fields.append( Field('pwd', 'VARCHAR(30)') )
mysqlClient.createTable(tableName=dm1Name, fields=fields)
fields = []
fields.append( Field('id', 'INT NOT NULL AUTO_INCREMENT') )
fields.append( Field('ip', 'VARCHAR(30)') )
fields.append( Field('data', 'VARCHAR(30)') )
fields.append( Field('hora', 'VARCHAR(30)') )
mysqlClient.createTable(tableName=dm2Name, fields=fields)
fields = ['nome', 'email', 'pwd']
mysqlClient.copyTableFields(tableName1=dm1Name, tableName2=tableName, fields=fields)
fields = ['ip', 'data', 'hora']
mysqlClient.copyTableFields(tableName1=dm2Name, tableName2=tableName, fields=fields)
"""
fields = ['ip', 'data']
mysqlClient.showMoreThanOne(tableName, fields)
|
dyegocaldeira/bigdata
|
app-rds.py
|
app-rds.py
|
py
| 2,815 |
python
|
en
|
code
| 0 |
github-code
|
50
|
9875537328
|
import random
class GeneratingRandomness:
def __init__(self):
self.min_num = 100
self.result_string = ''
self.check = ["000", "001", "010", "011", "100", "101", "110", "111"]
self.balance = 1000
def take_input(self):
list_collector = []
while True:
random_string = input('Print a random string containing 0 or 1:\n')
for item in random_string:
if item == '1' or item == '0':
list_collector.append(int(item))
if len(list_collector) < self.min_num:
print(f'Current data length is {len(list_collector)}, {self.min_num - len(list_collector)} symbols left')
continue
else:
self.result_string = ''.join(str(k) for k in list_collector)
print('Final data string:')
print(self.result_string + '\n')
break
def calculate(self):
string_list = [self.result_string[i:i + 4] for i in range(0, len(self.result_string))]
res_dict = {}
for triad_def in self.check:
# set the default value to (0, 0)
res_dict.setdefault(triad_def, (0, 0))
for triad in string_list:
if triad[:3] in self.check:
if len(triad) == 4 and triad.endswith('0'):
result = (res_dict.get(triad[:3])[0] + 1, res_dict.get(triad[:3])[-1])
res_dict.update({triad[:3]: result})
elif len(triad) == 4 and triad.endswith('1'):
result = (res_dict.get(triad[:3])[0], res_dict.get(triad[:3])[-1] + 1)
res_dict.update({triad[:3]: result})
return res_dict
def prediction(self):
print("You have $1000. Every time the system successfully predicts your next press, you lose $1.")
print("Otherwise, you earn $1. Print \"enough\" to leave the game. Let's go!")
while True:
entered_string = input('\nPrint a random string containing 0 or 1:\n\n')
if entered_string.isnumeric():
prediction_string = random.choice(self.check)
for i in range(0, len(entered_string) - 3):
next_num = '0' if self.calculate().get(entered_string[0 + i:3 + i])[0] > \
self.calculate().get(entered_string[0 + i:3 + i])[1] else '1'
prediction_string += next_num[0]
self.calculate_accuracy(prediction_string, entered_string)
elif entered_string == 'enough':
print('Game over!')
break
else:
continue
def calculate_accuracy(self, prediction_string, entered_string):
print(f'prediction:\n{prediction_string}\n')
money = 0
guessed_counter = 0
for item in range(3, len(prediction_string)):
if prediction_string[item] == entered_string[item]:
guessed_counter += 1
money += 1
else:
money -= 1
guess_percentage = round(guessed_counter / (len(prediction_string) - 3) * 100, 2)
self.balance = self.balance - money
print(f'Computer guessed right {guessed_counter} out of {len(prediction_string) - 3} symbols ({guess_percentage} %)')
print(f'Your balance is now ${self.balance}')
return guess_percentage
if __name__ == '__main__':
rand = GeneratingRandomness()
rand.take_input()
rand.prediction()
|
sergo8/Generating_Randomness
|
Generating Randomness/task/predictor/predictor.py
|
predictor.py
|
py
| 3,547 |
python
|
en
|
code
| 0 |
github-code
|
50
|
70173602715
|
import faiss # make faiss available
import numpy as np
import time
def IVFPQMultiGpu(config):
print("IVFPQMultiGpu, ", config)
d = config['dimension'] # dimension
nb = config['db_size'] # database size
nq = config['query_num'] # nb of queries
k = config['top_k']
config_gpus = config['gpus']
ngpus = faiss.get_num_gpus()
print("number of GPUs:", ngpus, ",running on gpus:", config_gpus)
gpus = range(config_gpus)
res = [faiss.StandardGpuResources() for _ in gpus]
vres = faiss.GpuResourcesVector()
vdev = faiss.IntVector()
for i, res in zip(gpus, res):
vdev.push_back(i)
vres.push_back(res)
index_list = []
for i in range(config['db_num']):
# Using an IVFPQ index
np.random.seed(i)
xb = np.random.random((nb, d)).astype('float32')
xb[:, 0] += np.arange(nb) / 1000.
nlist = config['nlist']
m = config['sub_quantizers']
code = config['bits_per_code']
# begin_time = time.time()
quantizer = faiss.IndexFlatL2(d) # the other index
index_ivfpq = faiss.IndexIVFPQ(quantizer, d, nlist, m, code)
# here we specify METRIC_L2, by default it performs inner-product search
# build the index
gpu_index_ivfpq = faiss.index_cpu_to_gpu_multiple(
vres, vdev, index_ivfpq)
gpu_index_ivfpq.referenced_objects = res
assert not gpu_index_ivfpq.is_trained
gpu_index_ivfpq.train(xb) # add vectors to the index
assert gpu_index_ivfpq.is_trained
gpu_index_ivfpq.add(xb) # add vectors to the index
print(i, ",size = ", gpu_index_ivfpq.ntotal)
index_list.append(gpu_index_ivfpq)
return index_list
|
egliu/faiss-quick-demo
|
src/gpufaiss/ivfpqmultigpu.py
|
ivfpqmultigpu.py
|
py
| 1,827 |
python
|
en
|
code
| 0 |
github-code
|
50
|
40843949822
|
def is_prime(num):
return primes[num]
def calc_score(num, name):
if name == "daewoong":
enemy = "gyuseong"
else:
enemy = "daewoong"
if not is_prime(num):
if len(maximum_3num[enemy]) < 3:
score[enemy] += 1000
else:
score[enemy] += min(maximum_3num[enemy])
elif num in numbers[name] or num in numbers[enemy]:
score[name] -= 1000
else:
numbers[name].add(num)
if len(maximum_3num[name]) < 3:
maximum_3num[name].append(num)
else:
maximum_3num[name].append(num)
maximum_3num[name].remove(min(maximum_3num[name]))
max_num = 5000000
primes = [True] * max_num
primes[0] = False
primes[1] = False
for i in range(2, int(max_num ** 0.5) + 1):
if not primes[i]:
continue
for j in range(2 * i, max_num, i):
primes[j] = False
names = ["daewoong", "gyuseong"]
score = {name: 0 for name in names}
numbers = {name: set() for name in names}
maximum_3num = {name: [] for name in names}
N = int(input())
for i in range(N):
d, g = map(int, input().split())
calc_score(d, names[0])
calc_score(g, names[1])
if score[names[0]] > score[names[1]]:
print("소수의 신 갓대웅")
elif score[names[0]] < score[names[1]]:
print("소수 마스터 갓규성")
else:
print("우열을 가릴 수 없음")
|
hellouz818/AlgorithmStudy
|
김원호/5회차/소수게임.py
|
소수게임.py
|
py
| 1,376 |
python
|
en
|
code
| 1 |
github-code
|
50
|
14832397675
|
rows, columns = [int(x) for x in input().split(', ')]
matrix = []
total_sum = 0
for row_index in range(rows):
matrix.append([int(x) for x in input().split(', ')])
for col_index in range(columns):
total_sum += matrix[row_index][col_index]
print(total_sum)
print(matrix)
|
Pavlina-G/Softuni-Python-Advanced
|
04. Multidimensional lists/Lab/01_2sum_matrix_elements.py
|
01_2sum_matrix_elements.py
|
py
| 296 |
python
|
en
|
code
| 0 |
github-code
|
50
|
11201796104
|
# Python
from __future__ import annotations
from dataclasses import KW_ONLY, dataclass
from dataclasses import field as set_field
from typing import TYPE_CHECKING
# SD-WebUI
from modules import sd_models, sd_vae
# Local
from sd_advanced_grid.utils import clean_name, get_closest_from_list, logger, parse_range_float, parse_range_int
# ################################### Types ################################## #
if TYPE_CHECKING:
from collections.abc import Callable
from modules.processing import StableDiffusionProcessing as SD_Proc
# ################################# Constants ################################ #
SHARED_OPTS = [
"CLIP_stop_at_last_layers",
"code_former_weight",
"face_restoration_model",
"eta_noise_seed_delta",
"sd_vae",
"sd_model_checkpoint",
"uni_pc_order",
"use_scale_latent_for_hires_fix",
]
# ######################### Axis Modifier Interpreter ######################## #
@dataclass
class AxisOption:
label: str
type: type[str | int | float | bool]
_: KW_ONLY
field: str | None = None
min: float = 0.0
max: float = 1.0
choices: Callable[..., list[str]] | None = None
toggles: str | None = None
cost: float = 0.2
_valid: list[bool] = set_field(init=False, default_factory=list)
_values: list[str] | list[int] | list[float] | list[bool] = set_field(init=False, default_factory=list)
_index: int = set_field(init=False, default=0)
@staticmethod
def apply_to(field: str, value: AxisOption.type, proc: SD_Proc):
if field in SHARED_OPTS:
proc.override_settings[field] = value
else:
setattr(proc, field, value)
def _apply(self, proc: SD_Proc):
value = self._values[self._index]
if self.type is None:
return
if self.toggles is None or value != "Default":
AxisOption.apply_to(self.id, value, proc)
if self.toggles:
if self.choices:
AxisOption.apply_to(self.toggles, value != "None", proc)
else:
AxisOption.apply_to(self.toggles, True, proc)
def apply(self, proc: SD_Proc):
"""tranform the value on the Processing job with the current selected value"""
if self._valid[self._index] is False:
raise RuntimeError(f"Value not valid for {self.label}: {self.value}")
try:
self._apply(proc)
except Exception as exc:
raise RuntimeError(f"{self.value} could not be applied on {self.label}") from exc
def next(self):
if self._index + 1 < self.length:
self._index += 1
return True
self._index = 0
return False
@property
def id(self): # pylint: disable=invalid-name
return self.field if self.field is not None else clean_name(self.label)
@property
def length(self):
return len(self._values)
@property
def values(self):
"""list of possible value"""
return self._values.copy()
@property
def value(self):
"""value to be applied"""
return self._values[self._index]
@property
def is_valid(self):
if not self._valid:
return None
return all(self._valid)
@property
def index(self):
return self._index
def dict(self):
return {"label": self.label, "param": self.id, "values": self.values}
def set(self, values: str = "") -> AxisOption:
"""format input from a string to a list of value"""
has_double_pipe = "||" in values
value_list = [val.strip() for val in values.split("||" if has_double_pipe else ",") if val.strip()]
if self.type == int:
self._values = parse_range_int(value_list)
elif self.type == float:
self._values = parse_range_float(value_list)
else:
self._values = [value for value in map(self._format_value, value_list) if value not in {"", None}] # type: ignore
return self
def unset(self):
self._index = 0
self._values = []
self._valid = []
def _format_value(self, value: str) -> AxisOption.type:
cast_value = None
if self.type == int:
cast_value = int(value)
elif self.type == float:
cast_value = round(float(value), 8)
elif self.type == bool:
cast_value = str(value).lower()
if cast_value in {"true", "yes", "1", "on"}:
cast_value = True
elif cast_value in {"false", "no", "0", "off"}:
cast_value = False
elif self.type == str and self.choices is not None:
valid_list = self.choices()
cast_value = get_closest_from_list(value, valid_list)
else:
cast_value = value
return cast_value
def validate(self, value: AxisOption.type) -> None:
"""raise an error if the data type is incorrect"""
same_type = isinstance(value, self.type)
if self.type in (int, float):
if not same_type:
raise RuntimeError(f"Must be a {self.type} number")
if self.min is not None and value < self.min: # type: ignore
raise RuntimeError(f"Must be at least {self.min}")
if self.max is not None and value > self.max: # type: ignore
raise RuntimeError(f"Must not exceed {self.max}")
if self.type == bool and not same_type:
raise RuntimeError("Must be either 'True' or 'False'")
if self.type == str and self.choices is not None and (not same_type or not value):
raise RuntimeError("Not found in the list")
if not same_type:
raise RuntimeError("Must be a valid type")
def validate_all(self, quiet: bool = True, **_):
def validation(value):
try:
self.validate(value)
except RuntimeError as err:
return f"'{err} for: {value}'"
return None
result = [validation(value) for value in self._values]
if any(result):
errors = [err for err in result if err]
if not quiet:
raise RuntimeError(f"Invalid parameters in {self.label}: {errors}")
logger.warn(f"Invalid parameters in {self.label}", errors)
self._valid = [err is None for err in result]
@dataclass
class AxisNothing(AxisOption):
type: None = None
def _apply(self, _):
return
@property
def is_valid(self):
return True
@dataclass
class AxisModel(AxisOption):
_: KW_ONLY
cost: float = 1.0 # change of checkpoints is too heavy, do it less often
def validate(self, value: str):
info = sd_models.get_closet_checkpoint_match(value)
if info is None:
raise RuntimeError("Unknown checkpoint")
@dataclass
class AxisVae(AxisOption):
_: KW_ONLY
cost: float = 0.7
def validate(self, value: str):
if value in {"None", "Automatic"}:
return
if sd_vae.vae_dict.get(value, None) is None:
raise RuntimeError("Unknown VAE")
@dataclass
class AxisReplace(AxisOption):
_: KW_ONLY
cost: float = 0.5 # to allow prompt to be replaced before string manipulation
_values: list[str] = set_field(init=False, default_factory=list)
__tag: str = set_field(init=False, default="")
def _apply(self, proc):
"""tranform the value on the Processing job"""
value = str(self._values[self._index])
proc.prompt = proc.prompt.replace(self.__tag, value)
proc.negative_prompt = proc.negative_prompt.replace(self.__tag, value)
def validate_all(self, quiet: bool = True, **kwargs):
proc = kwargs.pop("proc", None)
if proc is None:
return
error = ""
if not self.__tag:
error = "Values not set or invalid format"
elif self.__tag not in proc.prompt and self.__tag not in proc.negative_prompt:
error = f"Tag '{self.__tag}' not found in all prompts"
if error:
if quiet:
logger.warn(error)
else:
raise RuntimeError(error)
else:
self._valid = [True] * self.length
def set(self, values: str = "") -> AxisOption:
"""
Promt_replace can handle different format sunch as:
- 'one, two, three' => ['one=one', 'one=two', 'one=three']
- 'TAG=one, two, three' => ['TAG=one', 'TAG=two', 'TAG=three']
- 'TAG=one, TAG=two, TAG=three' => ['TAG=one', 'TAG=two', 'TAG=three']
- 'TAG-one || TAG=two, three || TAG=four' => ['TAG=one', 'TAG=two, three', 'TAG=four']
"""
has_double_pipe = "||" in values
value_list = [val.strip() for val in values.split("||" if has_double_pipe else ",")]
for value_pair in value_list:
value = [string.strip() for string in value_pair.split("=", maxsplit=1)]
if len(value) == 1 and value[0]:
tag = self.__tag or value[0]
self._values.append(value[0])
elif value[0]:
tag = self.__tag or value[0]
self._values.append(value[1])
else:
continue
self.__tag = tag
self.label = self.label.replace("TAG", self.__tag)
return self
def unset(self):
super().unset()
self.label = self.label.replace(self.__tag, "TAG")
self.__tag = ""
|
micky2be/a1111-sd-advanced-grid
|
sd_advanced_grid/grid_settings.py
|
grid_settings.py
|
py
| 9,531 |
python
|
en
|
code
| 1 |
github-code
|
50
|
26268082638
|
import os
import re
from pathlib import Path
import openai
import pandas
import numpy as np
import tiktoken
EMBEDDING_MODEL = "text-embedding-ada-002"
EMBEDDING_CTX_LENGTH = 8191
EMBEDDING_ENCODING = "cl100k_base"
MAX_EMBEDDINGS = 1536
MAX_TOKENS = 1600
GPT_MODEL = "gpt-3.5-turbo"
def get_embedding(text, model=EMBEDDING_MODEL):
return openai.Embedding.create(input=text, model=model)["data"][0]["embedding"]
def list_document_files():
# return all markdown files under jekyll/_cci2 folder
folder_path = Path("./docs")
md_files = folder_path.glob("*.md")
files = [f for f in md_files]
return files
def parse(filepath):
with open(filepath) as f:
raw = f.read()
meta = {"filepath": filepath}
# parse metadata
if raw.startswith("---"):
raw_header, body = raw.split("---", 2)[1:]
for raw_line in raw_header.split("\n"):
line = raw_line.strip()
if ":" in line:
key, val = line.split(":", 1)
meta[key.strip()] = val.strip(" \"'")
else:
body = raw
title = meta["title"] if "title" in meta else meta["filename"]
body = f"# {title}\n{body}"
sections = re.findall("[#]{1,4} .*\n", body)
split_txt = "-=-=-=-=-="
# TODO: ignore "Next Step"
for section in sections:
body = body.replace(section, split_txt)
# TODO: strip `{: header }`
contents = [x.strip() for x in body.split(split_txt)]
headers = [x.strip("# \n") for x in sections]
sections_tuple = zip(headers, contents)
# skip short sections
sections_tuple = [(x, y) for x, y in sections_tuple if len(y.strip()) > 30]
return meta, sections_tuple
def get_document_embeddings(files):
embeddings = []
for f in files:
_, section_tuple = parse(f)
for header, section in section_tuple:
print("calculating embeddings:", str(f), header)
embeddings.append(
{
"title": str(f),
"header": header,
"section": section,
"emb": get_embedding(section),
}
)
return embeddings
def save_embeddings_to_csv(embeddings):
cols = ("title", "header", "section") + tuple(range(MAX_EMBEDDINGS))
rows = []
for emb in embeddings:
# print("processing csv:", emb["title"], emb["header"])
new_row = [emb["title"], emb["header"], emb["section"]]
for i in range(MAX_EMBEDDINGS):
new_row.append(emb["emb"][i])
rows.append(new_row)
export_df = pandas.DataFrame(rows, columns=cols)
export_df.to_csv("embeddings.csv", index=False)
def cal_embeddings():
OPENAI_API_KEY = os.getenv("OPENAI_API_KEY")
if not OPENAI_API_KEY:
raise Exception("OPENAI_API_KEY is not set")
files = list_document_files()
embeddings = get_document_embeddings(files)
save_embeddings_to_csv(embeddings)
def vector_projection(a, b):
# calculate similarity of two vectors
return np.dot(np.array(a), np.array(b))
def convert_embeddings_from_str(emb):
embeddings = []
for i in range(MAX_EMBEDDINGS):
embeddings.append(float(emb[str(i)]))
return embeddings
def get_relevant_sections(input_emb, document_emb):
distance = []
for index, row in document_emb.iterrows():
distance.append(
(vector_projection(input_emb, convert_embeddings_from_str(row)), index)
)
# return the top 10 most relevant sections
rows_index = [i[1] for i in sorted(distance, reverse=True)[:10]]
relevant_sections = document_emb.loc[rows_index]
return [s["section"] for _, s in relevant_sections.iterrows()]
def get_all_embeddings_from_csv():
embeddings = pandas.read_csv("embeddings.csv")
return embeddings
def num_tokens(text):
encoding = tiktoken.encoding_for_model(GPT_MODEL)
return len(encoding.encode(text))
def construct_context(sections):
# Ensure context token length < max tokens
context = sections[0]
length = num_tokens(context)
for section in sections[1:]:
section_len = num_tokens(section)
if length + section_len > MAX_TOKENS:
break
context += section
length += section_len
return context
def request(prompt, context=""):
# Send request to OpenAI API
print("Asking ChatGPT...")
messages = [
{
"role": "system",
"content": "You're a CircleCI doc assistant. \
Answer the question based on the context provided.",
},
{"role": "assistant", "content": context},
{"role": "user", "content": prompt},
]
chat_completion = openai.ChatCompletion.create(model=GPT_MODEL, messages=messages)
print(chat_completion.choices[0].message.content)
print("\n")
def start_chatting():
while True:
user_input = input("Please enter your prompt: ")
if user_input == "exit":
break
if not user_input:
continue
input_emb = get_embedding(user_input)
# TODO: cache document embeddings
document_embeddings = get_all_embeddings_from_csv()
relevant_sections = get_relevant_sections(input_emb, document_embeddings)
context = construct_context(relevant_sections)
request(user_input, context)
if __name__ == "__main__":
# cal_embeddings()
start_chatting()
|
kpister/prompt-linter
|
data/scraping/repos/liamchzh~circleci-docs-assistant/doc-assistant.py
|
doc-assistant.py
|
py
| 5,549 |
python
|
en
|
code
| 0 |
github-code
|
50
|
72147729114
|
from django.urls import path
# from . import views
from das_admin import views
urlpatterns = [
path('index',views.index,name='index'),
path('',views.login,name='login'),
path('register',views.register,name='register'),
path('profile',views.profile,name='profile'),
path('patient_list',views.patient_list,name='patient_list'),
path('doctor_list',views.doctor_list,name='doctor_list'),
path('appointment_list',views.appointment_list,name='appointment_list'),
path('doctor_profile',views.doctor_profile,name='doctor_profile'),
path('doctor_deactive/<int:id>',views.doctor_deactive,name='doctor_deactive'),
path('doctor_active/<int:id>',views.doctor_active,name='doctor_active'),
path('blank_page',views.blank_page,name='blank_page'),
path('components',views.components,name='components'),
path('data_tables',views.data_tables,name='data_tables'),
path('error_404',views.error_404,name='error_404'),
path('error_500',views.error_500,name='error_500'),
path('forgot_password',views.forgot_password,name='forgot_password'),
path('form_basic_inputs',views.form_basic_inputs,name='form_basic_inputs'),
path('form_horizontal',views.form_horizontal,name='form_horizontal'),
path('form_input_groups',views.form_input_groups,name='form_input_groups'),
path('form_mask',views.form_mask,name='form_mask'),
path('form_validation',views.form_validation,name='form_validation'),
path('form_vertical',views.form_vertical,name='form_vertical'),
path('invoice_report',views.invoice_report,name='invoice_report'),
path('invoice',views.invoice,name='invoice'),
path('lock_screen',views.lock_screen,name='lock_screen'),
path('reviews',views.reviews,name='reviews'),
path('settings',views.settings,name='settings'),
path('tables_basic',views.tables_basic,name='tables_basic'),
path('transactions_list',views.transactions_list,name='transactions_list'),
]
|
mayuri0610/python
|
PROJECT/Self Project/Dr.Appoinment System/CORE/das_admin/urls.py
|
urls.py
|
py
| 1,952 |
python
|
en
|
code
| 0 |
github-code
|
50
|
10678997325
|
#!/usr/bin/env python3
import random
import sys
import common
LENGTH = 4
COLORS = ['R', 'V', 'B', 'J', 'N', 'M', 'O', 'G']
def choices(e, n):
"""Renvoie une liste composée de n éléments tirés de e avec remise
On pourrait utiliser random.choices, mais cette fonction n'est pas
disponible dans les versions plus anciennes de Python
"""
return [random.choice(e) for i in range(n)]
def evaluation (attempt,solution):
"""
Fonction qui compare l'essai proposé par codebreaker à la solution
de référence de codemaker.
Les arguments sont donc l'essai de codebreaker et la solution de codemaker.
Renvoie un couple de deux entiers: le nombre de bonnes couleurs bien placées
et le nombre de bonnes couleurs mal placées.
"""
#on commence par s'assurer que la combinaison proposée ait une longueur valide
if len(attempt) != len(solution):
sys.exit("Erreur : les deux combinaisons n'ont pas la même longueur")
sol=[]
#on transforme solution sous forme de liste afin de pouvoir modifier chaque caractère indépendamment
for j in range(len(solution)):
sol.append(solution[j])
#on initilise le nombre de plot bien placés à 0
pbp=0
#on initialise le nombre de plot bien et mal placés 0
pbmp=0
#on commence par chercher les plots bien placés en comparant le ième élément de solution avec le ième élément d'attempt
for i in range(len(attempt)):
if attempt[i]== sol[i]:
pbp += 1 #si les deux sont les mêmes on implémente pbp
#pour connaître les plots mal placés, on parcourt et compare tous les élément de solution pour chaque élément d'attempt
for k in range(len(sol)):
#si l'un des éléments d'attempt est le même que le ième élément de solution
if attempt[i] == sol[k]:
#on remplace ce caractère par une chaîne vide car s'il est présent plusieurs fois dans attempt il faut le compter qu'une seule fois
sol[k]=''
pbmp += 1 #puis on implémente pbmp
break #on passe directement à la boucle suivante puisqu'on a trouvé une correspondance
pmp=pbmp-pbp #enfin pour avoir uniquement les plots mal placés on soustrait le nombre de plots bien placés
return (pbp,pmp)
def donner_possibles(attempt,evaluation):
"""
Arguments: essai proposé par codebreaker puis l'evaluation
renvoyée par codemaker pour cet essai.
Renvoie l'ensemble des solutions encore possibles après la première évaluation.
"""
import itertools
#on créer combi_possible en variable globale afin de ne pas recommencer à zéro à chaque fois que codebreaker fait un essai
global combi_possibles
#on rentre dans combi_possible toutes les combinaisons possibles de 4 couleurs parmis 8 avec l'ordre qui compte
produit=itertools.product(COLORS,repeat=LENGTH)
combi_possibles=set([chaine(q) for q in produit])
return maj_possibles(combi_possibles,attempt,evaluation)
def chaine (q):
"""
Cette fonction permet de prendre un tuple de 4 lettres pour le mettre sous
la forme d'une seule chaîne de 4 caractères.
L'argument est donc un quadruplet du type ('J','B','B','G') et renvoie
une chaîne de caractère du type 'JBBG'.
"""
combinaison='' #on initialise la variable dans laquelle on mettra notre chaîne de caractères
#pour chaque élément de l'argument
for i in q:
combinaison+=i #on l'ajoute à la chaîne de caractère
return combinaison
def maj_possibles(combi_possibles,attempt,evaluation):
"""
Prend en arguments le set des combinaisons encore possibles, le dernier
essai effectué et l'évaluation associée.
Renvoie une mise-à-jour des combinaisons encore possibles en prenant en compte
le nouvel essai et son évaluation.
"""
poss=combi_possibles.copy()
#pour chaque élement de combi_possible
for i in poss:
#on regarde ce que renvoie l'évaluation du ième élément de combi_possible avec le dernier essai
#si l'évaluation est différente de celle renvoyée par codemaker
if common.evaluation(attempt,i)!=evaluation:
#on supprime l'élément de combi_possible
combi_possibles.remove(i)
return combi_possibles
|
Margob29/mastermind
|
common.py
|
common.py
|
py
| 4,405 |
python
|
fr
|
code
| 0 |
github-code
|
50
|
8943094478
|
# program to execute all dependancies of task before execution of task itself
class Task:
def __init__(self, name, dependancies = None):
self.name = name
self.dependancies = dependancies
self.state = False
def execute(self):
if self.dependancies is not None:
for task in self.dependancies:
if task.state is False:
task.execute()
print('executing : ' + self.name)
self.state = True
def main():
task_e = Task('E')
task_d = Task('D', [task_e])
task_a = Task('A')
task_b = Task('B', [task_a])
task_c = Task('C', [task_b, task_a, task_d])
task_f = Task('F', [task_c])
task_f.execute()
main()
|
ch374n/python-programming
|
recursion/dependancies.py
|
dependancies.py
|
py
| 629 |
python
|
en
|
code
| 0 |
github-code
|
50
|
41426028197
|
from math import sqrt
import numpy as np
def proj_length(v, v_on):
on_norm = np.linalg.norm(v_on)
v_len = np.linalg.norm(v)
projection_len = 0
rejection_len = 0
if on_norm > 0.01:
projection_len = np.dot(v, v_on) / on_norm
if v_len > abs(projection_len):
rejection_len = sqrt(v_len ** 2 - projection_len ** 2)
return projection_len, rejection_len
def find_foci(arr_pts):
# not necessary for foci calc, just additional animation
_pts_search_animations = []
# shuffle to improve main axis search, can be optimized
pts = np.copy(arr_pts)
np.random.shuffle(pts)
pts_len = len(pts)
pt_average = np.sum(pts, axis=0) / pts_len
vec_major = pt_average * 0
minor_max, major_max = 0, 0
# may be improved with overlapped pass,
# when max calcs are started after delay when axis is less random
for pt_cur in pts:
vec_cur = pt_cur - pt_average
proj_len, rej_len = proj_length(vec_cur, vec_major)
if proj_len < 0:
vec_cur = -vec_cur
vec_major += (vec_cur - vec_major) / pts_len
major_max = max(major_max, abs(proj_len))
minor_max = max(minor_max, rej_len)
_pts_search_animations += [[pt_cur, np.copy(vec_major)]]
# if both very close, i.e. cloud is sphere, may happen
if major_max < minor_max:
major_max, minor_max = minor_max, major_max
vec_major_unit = vec_major / np.linalg.norm(vec_major)
vec_foci = vec_major_unit * sqrt(major_max ** 2 - minor_max ** 2)
foci_1 = pt_average + vec_foci
foci_2 = pt_average - vec_foci
return foci_1, foci_2, _pts_search_animations
def find_ellipsoid(arr_pts):
foci_1, foci_2, _pts_search_animations = find_foci(arr_pts)
string_pro_calc = 0
for pt_cur in arr_pts:
cur_pt_radius = np.linalg.norm(pt_cur - foci_1) + np.linalg.norm(pt_cur - foci_2)
string_pro_calc = max(string_pro_calc, cur_pt_radius)
return foci_1, foci_2, string_pro_calc, _pts_search_animations
|
halt9k/bounded-ellipsoid
|
src/bounded_ellipsoid_alg.py
|
bounded_ellipsoid_alg.py
|
py
| 2,041 |
python
|
en
|
code
| 2 |
github-code
|
50
|
16106954536
|
#!/usr/bin/env python
# coding: utf-8
# In[1]:
#pip install orjson
#pip install tqdm
#pip install scipy
import json
import re
import numpy as np
#from tqdm import notebook
import collections
from tqdm import tqdm
from scipy import sparse
# In[2]:
#"data/stopword.list"
def get_stop_words(path):
stop_word = set()
list_file = open(path, 'r').read().split("\n")
for line in list_file:
stop_word.add(line)
return stop_word
# In[3]:
def tokenize(text, stop_word):
text_tokens = []
text = re.sub('[^\s\w]|\w*\d\w*', '', text).split()
#reference : https://greeksharifa.github.io/%EC%A0%95%EA%B7%9C%ED%91%9C%ED%98%84%EC%8B%9D(re)/2018/08/04/regex-usage-05-intermediate/
for token in text:
if token not in stop_word:
text_tokens.append(token.strip())
return text_tokens
# In[4]:
#data_path='data/yelp_reviews_train.json'
def extract(data_path):
tmp_token=[]
tmp_star = []
tmp_rating = []
stop_word=get_stop_words("data/stopword.list")
lines = open(data_path, 'r').read().split("\n")
for line in tqdm(lines):
if line == "":
continue
review = json.loads(line)
str_token = tokenize(review['text'].lower(),stop_word)
tmp_token.append(str_token)
np_star = np.zeros(5)
rating = int(review['stars'])
np_star[rating - 1] = 1
tmp_star.append(np_star)
tmp_rating.append(rating)
return tmp_token,tmp_star,tmp_rating
# In[5]:
#data_path='data/yelp_reviews_train.json'
def dev_extract(data_path):
tmp_token=[]
stop_word=get_stop_words("data/stopword.list")
lines = open(data_path, 'r').read().split("\n")
for line in tqdm(lines):
if line == "":
continue
review = json.loads(line)
str_token = tokenize(review['text'].lower(),stop_word)
#print(str_token)
tmp_token.append(str_token)
return tmp_token
# In[6]:
token,star,rating=extract('data/yelp_reviews_train.json')
# In[7]:
'''score=[0,0,0,0,0]
for i in star:
score+=i
print("score : ",score)
print("ratio :", score/sum(score))'''
# In[8]:
train_token=token[:int(len(token)*0.8)]
train_star=star[:int(len(token)*0.8)]
train_rating=rating[:int(len(token)*0.8)]
test_token=token[int(len(token)*0.8):]
test_star=star[int(len(token)*0.8):]
test_rating=rating[int(len(token)*0.8):]
# In[9]:
len(train_rating)
# In[10]:
def CTF_dict_new(token,CTF_vocab):
dic={}
C=collections.Counter(token)
for i in set(token):
if i in CTF_vocab:
dic[CTF_vocab.index(i)]=C[i]
return dic
# In[11]:
def DF_dict_new(token,DF_vocab):
dic={}
C=collections.Counter(token)
for i in set(token):
if i in DF_vocab:
dic[DF_vocab.index(i)]=C[i]
return dic
# In[12]:
def get_txt(path):
tmp=[]
f=open(path,'r')
while True:
line = f.readline()
if not line: break
tmp.append(line.rstrip('\n'))
f.close()
return tmp
# In[13]:
def CTF(token):
vocab_freq={}
for i in tqdm(range(len(token))):
tokens=token[i]
for w in tokens:
try:
vocab_freq[w]+=1
except:
vocab_freq[w]=1
sorted_v = sorted(vocab_freq.items(), key=lambda kv: kv[1],reverse=True)
vocab_freq = collections.OrderedDict(sorted_v)
CTF_vocab=[x for x in vocab_freq]
CTF_vocab=CTF_vocab[:2000]
return CTF_vocab
# In[14]:
def DF(token):
DF = {}
for i in range(len(token)):
tokens = token[i]
for w in tokens:
try:
DF[w].add(i)
except:
DF[w] = {i}
for i in DF:
DF[i]=len(DF[i])
sorted_df = sorted(DF.items(), key=lambda kv: kv[1],reverse=True)
DF_freq = collections.OrderedDict(sorted_df)
DF_vocab=[x for x in DF_freq]
DF_vocab=DF_vocab[:2000]
return DF_vocab
# In[15]:
def get_CTF_matrix(token,CTF_vocab):
import time
start=time.time()
row_ctf=[]
col_ctf=[]
data_ctf=[]
n=0
for i in tqdm(token):
dic=CTF_dict_new(i,CTF_vocab)
row_ctf.extend([n]*len(dic))
col_ctf.extend(dic.keys())
data_ctf.extend(dic.values())
del dic
n+=1
print("CTF_MATRIX DONE : ", time.time()-start)
CTF_mtx=sparse.csr_matrix((data_ctf, (row_ctf, col_ctf)), shape=(len(token), 2000))
return CTF_mtx
# In[16]:
def get_DF_matrix(token,DF_vocab):
import time
start=time.time()
row_df=[]
col_df=[]
data_df=[]
n=0
for i in tqdm(token):
dic=DF_dict_new(i,DF_vocab)
row_df.extend([n]*len(dic))
col_df.extend(dic.keys())
data_df.extend(dic.values())
del dic
n+=1
print("DF_MATRIX DONE : ",time.time()-start)
DF_mtx=sparse.csr_matrix((data_df, (row_df, col_df)), shape=(len(token), 2000))
return DF_mtx
# In[17]:
import random
def logistic_regression(train_mtx, list_star,rating,test_mtx,test_rating):
label_mtx = np.array(list_star)
# use gradient ascent to update model
alpha = 0.003
lamda = 0.5
steps = 100000
#batch_size=5000
model_gd = gradient_ascent(train_mtx, label_mtx, alpha, lamda, steps,rating,test_mtx,test_rating)
return model_gd
def gradient_ascent(train_mtx, label_mtx, alpha, lamda, steps,rating,test_mtx,test_rating):
import math
rmse_list=[]
# initialize matrix w
test_rating=np.array(test_rating,dtype=float)
model_mtx = np.zeros((5, 2000))
row_size = train_mtx.shape[0]
for step in range(0, steps):
alpha *= 1 / (1 + alpha * lamda * step)
pick = random.sample(range(row_size), 8000)
sgd_mtx = train_mtx[pick, :]
sgd_label = label_mtx[pick, :]
e_wx = np.exp(sgd_mtx * model_mtx.transpose())
e_sum = np.sum(e_wx, axis=1)
e_div = (e_wx.transpose() / e_sum).transpose()
sgd_sub = np.subtract(sgd_label, e_div)
gradient = alpha * (sgd_sub.transpose() * sgd_mtx - lamda * model_mtx)
model_mtx += gradient
exp_wx = np.exp(model_mtx * test_mtx.transpose())
cond_prob = exp_wx / np.sum(exp_wx, axis=0)
label = np.array([[1], [2], [3], [4], [5]])
soft_pred = np.sum(label * cond_prob, axis=0)
rmse = math.sqrt(np.sum(np.square(soft_pred - test_rating)/soft_pred.shape[0]))
rmse_list.append(float(rmse))
rmse_list=rmse_list[-20:]
#print(np.array(rmse_list))
#print(step,rmse)
print(rmse)
if step > 20:
if np.array(rmse_list).max()- np.array(rmse_list).min()<0.001:
print('converge')
print(step)
print(rmse_list)
break
#if np.sqrt(np.sum(np.square(gradient))) < 0.00001:
#break
return model_mtx
# In[18]:
def validate_model(model_mtx, eval_mtx, eval_label):
import math
row_size = eval_mtx.shape[0]
exp_wx = np.exp(model_mtx * eval_mtx.transpose())
cond_prob = exp_wx / np.sum(exp_wx, axis=0)
hard_pred = np.argmax(cond_prob, axis=0) + 1
correct = np.sum(hard_pred == eval_label)
acc = (correct + 0.0) / row_size
label = np.array([[1], [2], [3], [4], [5]])
soft_pred = np.sum(label * cond_prob, axis=0)
rmse = math.sqrt(np.sum(np.square(soft_pred - eval_label)/soft_pred.shape[0]))
return print('ACC :', acc, ' RMSE :', rmse)
# In[19]:
def write(model_mtx, test_mtx,save_path):
row_size = test_mtx.shape[0]
f = open(save_path, 'w')
label = np.array([1, 2, 3, 4, 5])
for line in range(row_size):
exp_wx = np.exp(model_mtx * test_mtx[line, :].transpose())
cond_prob = exp_wx / np.sum(exp_wx)
hard_pred = np.argmax(cond_prob) + 1
soft_pred = np.sum(label * cond_prob.transpose())
f.write(str(hard_pred) + " " + str(soft_pred) + "\n")
# In[20]:
def write_no_lb(preds,save_path):
f = open(save_path, 'w')
for line in preds:
f.write(str(line)+" "+"0"+"\n")
# # DEV & TEST
# In[21]:
dev_token=dev_extract('data/yelp_reviews_dev.json')
# In[22]:
'''ttest_token=dev_extract('data/yelp_reviews_test.json')'''
# # DF
# In[23]:
#train_token, test_token
#train_rating test_rating
#train_star test_star
# In[24]:
DF_vocab=DF(train_token)
# In[25]:
DF_train_mtx=get_DF_matrix(train_token,DF_vocab)
DF_test_mtx=get_DF_matrix(test_token,DF_vocab)
# In[26]:
pred_df_mtx=logistic_regression(DF_train_mtx,train_star,train_rating,DF_test_mtx,test_rating)
# In[27]:
validate_model(pred_df_mtx,DF_train_mtx,train_rating)
# In[28]:
#predict(pred_df_mtx,DF_mtx,'results/train_df.txt')
# In[29]:
'''dev_DF_mtx=get_DF_matrix(dev_token,DF_vocab)'''
# In[30]:
def write(model_mtx, test_mtx,save_path):
row_size = test_mtx.shape[0]
f = open(save_path, 'w')
label = np.array([1, 2, 3, 4, 5])
for line in range(row_size):
exp_wx = np.exp(model_mtx * test_mtx[line, :].transpose())
cond_prob = exp_wx / np.sum(exp_wx)
hard_pred = np.argmax(cond_prob) + 1
soft_pred = np.sum(label * cond_prob.transpose())
f.write(str(hard_pred) + " " + str(soft_pred) + "\n")
# In[31]:
#test_DF_mtx=get_matrix(test_token,"DF")
# In[32]:
'''write(pred_df_mtx,dev_DF_mtx,'lr_dev_df.txt')'''
# In[33]:
#write(pred_df_mtx,test_DF_mtx,'results/lr_test_df.txt')
# In[34]:
from sklearn.svm import LinearSVC
import time
# initialise the SVM classifier
DF_classifier = LinearSVC(dual=False)
# train the classifier
start = time.time()
DF_classifier.fit(DF_train_mtx, train_rating)
print(time.time()-start)
# In[35]:
df_svm_preds = DF_classifier.predict(DF_train_mtx)
'''write_no_lb(df_svm_preds,'results/svm_train_df.txt')'''
# In[36]:
'''dev_df_svm_preds = DF_classifier.predict(dev_DF_mtx)
write_no_lb(dev_df_svm_preds,'svm_dev_df.txt')'''
# In[37]:
'''test_df_svm_preds = DF_classifier.predict(test_DF_mtx)
write_no_lb(test_df_svm_preds,'results/svm_test_df.txt')'''
# In[38]:
correct = np.sum(df_svm_preds == train_rating)
print("DF-SVM-ACC :",(correct + 0.0) / len(df_svm_preds))
# # CTF
# In[39]:
CTF_vocab=CTF(train_token)
# In[40]:
CTF_train_mtx=get_CTF_matrix(train_token,CTF_vocab)
CTF_test_mtx=get_CTF_matrix(test_token,CTF_vocab)
# In[41]:
pred_ctf_mtx=logistic_regression(CTF_train_mtx,train_star,train_rating,CTF_test_mtx,test_rating)
# In[42]:
validate_model(pred_ctf_mtx,CTF_train_mtx,train_rating)
# In[43]:
dev_CTF_mtx=get_CTF_matrix(dev_token,DF_vocab)
# In[44]:
#write(pred_ctf_mtx,dev_CTF_mtx,'lr_dev_ctf.txt')
# In[ ]:
# In[45]:
from sklearn.svm import LinearSVC
import time
# initialise the SVM classifier
CTF_classifier = LinearSVC(dual=False)
# train the classifier
start = time.time()
CTF_classifier.fit(CTF_train_mtx, train_rating)
print(time.time()-start)
# In[46]:
ctf_svm_preds = CTF_classifier.predict(CTF_train_mtx)
# In[47]:
correct = np.sum(ctf_svm_preds == train_rating)
print("DF-SVM-ACC :",(correct + 0.0) / len(ctf_svm_preds))
# In[48]:
'''dev_ctf_svm_preds = CTF_classifier.predict(dev_CTF_mtx)
write_no_lb(dev_ctf_svm_preds,'svm_dev_ctf.txt')'''
# In[ ]:
# In[ ]:
# In[ ]:
# In[ ]:
# In[ ]:
# In[ ]:
# In[ ]:
# In[ ]:
|
814yk/Yelp-Rating-Prediction
|
CTF_DF.py
|
CTF_DF.py
|
py
| 11,329 |
python
|
en
|
code
| 1 |
github-code
|
50
|
38133489680
|
import tkinter as tk
from tkinter import *
win = tk.Tk()
win.geometry('')
win.title('.:.Calculator.:.')
win.geometry('400x600+550+100')
win.resizable(0, 0)
screentxt = ''
def my_btndot():
global screentxt
screentxt += str('.')
lbl.config(text=screentxt)
def my_btn0():
global screentxt
screentxt += str(0)
lbl.config(text=screentxt)
def my_btn1():
global screentxt
screentxt += str(1)
lbl.config(text=screentxt)
def my_btn2():
global screentxt
screentxt += str(2)
lbl.config(text=screentxt)
def my_btn3():
global screentxt
screentxt += str(3)
lbl.config(text=screentxt)
def my_btn4():
global screentxt
screentxt += str(4)
lbl.config(text=screentxt)
def my_btn5():
global screentxt
screentxt += str(5)
lbl.config(text=screentxt)
def my_btn6():
global screentxt
screentxt += str(6)
lbl.config(text=screentxt)
def my_btn7():
global screentxt
screentxt += str(7)
lbl.config(text=screentxt)
def my_btn8():
global screentxt
screentxt += str(8)
lbl.config(text=screentxt)
def my_btn9():
global screentxt
screentxt += str(9)
lbl.config(text=screentxt)
def my_btnclear():
global lbl
global screentxt
screentxt = ''
lbl.config(text=screentxt)
def my_btnadd():
global screentxt
screentxt += str('+')
lbl.config(text=screentxt)
def my_btnminus():
global screentxt
screentxt += str('-')
lbl.config(text=screentxt)
def my_btndivide():
global screentxt
screentxt += str('/')
lbl.config(text=screentxt)
def my_btnmulti():
global screentxt
screentxt += str('*')
lbl.config(text=screentxt)
def my_btnback():
global screentxt
if len(screentxt)>0:
txtlist = list(screentxt)
txtlist.remove(txtlist[-1])
screentxt = ''.join(txtlist)
lbl.config(text=screentxt)
def my_btnEqual():
global screentxt
screentxt = str(eval(screentxt))
lbl.config(text=screentxt)
lbl = tk.Label(win, bg='#B22222', fg='#FFD700', font=10)
lbl.place(height=100, width=400, x=0, y=0)
btn0 = tk.Button(win, text='0', bg='#2F4F4F',
fg='#FFD700', font=5, command=my_btn0)
btn0.place(height=100, width=200, x=0, y=500)
btn1 = tk.Button(win, text='1', bg='#2F4F4F',
fg='#FFD700', font=5, command=my_btn1)
btn1.place(height=100, width=100, x=0, y=400)
btn2 = tk.Button(win, text='2', bg='#2F4F4F',
fg='#FFD700', font=5, command=my_btn2)
btn2.place(height=100, width=100, x=100, y=400)
btn3 = tk.Button(win, text='3', bg='#2F4F4F',
fg='#FFD700', font=5, command=my_btn3)
btn3.place(height=100, width=100, x=200, y=400)
btn4 = tk.Button(win, text='4', bg='#2F4F4F',
fg='#FFD700', font=5, command=my_btn4)
btn4.place(height=100, width=100, x=0, y=300)
btn5 = tk.Button(win, text='5', bg='#2F4F4F',
fg='#FFD700', font=5, command=my_btn5)
btn5.place(height=100, width=100, x=100, y=300)
btn6 = tk.Button(win, text='6', bg='#2F4F4F',
fg='#FFD700', font=5, command=my_btn6)
btn6.place(height=100, width=100, x=200, y=300)
btn7 = tk.Button(win, text='7', bg='#2F4F4F',
fg='#FFD700', font=5, command=my_btn7)
btn7.place(height=100, width=100, x=0, y=200)
btn8 = tk.Button(win, text='8', bg='#2F4F4F',
fg='#FFD700', font=5, command=my_btn8)
btn8.place(height=100, width=100, x=100, y=200)
btn9 = tk.Button(win, text='9', bg='#2F4F4F',
fg='#FFD700', font=5, command=my_btn9)
btn9.place(height=100, width=100, x=200, y=200)
btnclear = tk.Button(win, text='C', bg='#2F4F4F',
fg='#FFD700', font=5, command=my_btnclear)
btnclear.place(height=100, width=200, x=0, y=100)
btnback = tk.Button(win, text=u'\u232B', bg='#2F4F4F',
fg='#FFD700', font=5, command=my_btnback)
btnback.place(height=100, width=100, x=200, y=100)
btnEqual = tk.Button(win, text='=', bg='#2F4F4F',
fg='#FFD700', font=5, command=my_btnEqual)
btnEqual.place(height=100, width=100, x=300, y=500)
btndot = tk.Button(win, text='.', bg='#2F4F4F',
fg='#FFD700', font=5, command=my_btndot)
btndot.place(height=100, width=100, x=200, y=500)
btndivide = tk.Button(win, text='÷', bg='#2F4F4F',
fg='#FFD700', font=5, command=my_btndivide)
btndivide.place(height=100, width=100, x=300, y=400)
btnmulti = tk.Button(win, text='x', bg='#2F4F4F',
fg='#FFD700', font=5, command=my_btnmulti)
btnmulti.place(height=100, width=100, x=300, y=300)
btnadd = tk.Button(win, text='+', bg='#2F4F4F',
fg='#FFD700', font=5, command=my_btnadd)
btnadd.place(height=100, width=100, x=300, y=200)
btnminus = tk.Button(win, text='-', bg='#2F4F4F',
fg='#FFD700', font=5, command=my_btnminus)
btnminus.place(height=100, width=100, x=300, y=100)
win.mainloop()
|
yazdanghasemi/Tools_For_Restuarant
|
Calculator_version1.py
|
Calculator_version1.py
|
py
| 4,950 |
python
|
en
|
code
| 0 |
github-code
|
50
|
33168831099
|
import RPi.GPIO as GPIO
leftWheelPins = ((11, 13, 15), (12, 16, 18))
rightWheelPins = ((33, 35, 37), (36, 38, 40))
def setupWheels():
for pins in leftWheelPins + rightWheelPins:
GPIO.setup(pins[0], GPIO.OUT, initial=GPIO.LOW)
GPIO.setup(pins[1], GPIO.OUT)
GPIO.setup(pins[2], GPIO.OUT)
def enablePins(pins):
for pin_set in pins:
GPIO.output(pin_set[0], GPIO.HIGH)
def disablePins(pins):
for pin_set in pins:
GPIO.output(pin_set[0], GPIO.LOW)
def rightWheels(direction):
enablePins(rightWheelPins)
if direction == "forward":
GPIO.output(rightWheelPins[0][1], GPIO.HIGH)
GPIO.output(rightWheelPins[0][2], GPIO.LOW)
GPIO.output(rightWheelPins[1][1], GPIO.LOW)
GPIO.output(rightWheelPins[1][2], GPIO.HIGH)
elif direction == "backward":
GPIO.output(rightWheelPins[0][1], GPIO.LOW)
GPIO.output(rightWheelPins[0][2], GPIO.HIGH)
GPIO.output(rightWheelPins[1][1], GPIO.HIGH)
GPIO.output(rightWheelPins[1][2], GPIO.LOW)
else:
disablePins(rightWheelPins)
def leftWheels(direction):
enablePins(leftWheelPins)
if direction == "forward":
GPIO.output(leftWheelPins[0][1], GPIO.LOW)
GPIO.output(leftWheelPins[0][2], GPIO.HIGH)
GPIO.output(leftWheelPins[1][1], GPIO.LOW)
GPIO.output(leftWheelPins[1][2], GPIO.HIGH)
elif direction == "backward":
GPIO.output(leftWheelPins[0][1], GPIO.HIGH)
GPIO.output(leftWheelPins[0][2], GPIO.LOW)
GPIO.output(leftWheelPins[1][1], GPIO.HIGH)
GPIO.output(leftWheelPins[1][2], GPIO.LOW)
else:
disablePins(leftWheelPins)
def forward(wheels):
if wheels == "right-wheels":
rightWheels("forward")
elif wheels == "left-wheels":
leftWheels("forward")
else:
rightWheels("forward")
leftWheels("forward")
def backward(wheels):
if wheels == "right-wheels":
rightWheels("backward")
elif wheels == "left-wheels":
leftWheels("backward")
else:
rightWheels("backward")
leftWheels("backward")
def stop(wheels):
if wheels == "right-wheels":
rightWheels("stop")
elif wheels == "left-wheels":
leftWheels("stop")
else:
rightWheels("stop")
leftWheels("stop")
def turn(direction):
if direction == "right":
forward("left-wheels")
backward("right-wheels")
else:
forward("right-wheels")
backward("left-wheels")
|
Sohan-Dillikar/Raspberry_Pi_Bluetooth_RC_Car
|
Main_Code/wheels.py
|
wheels.py
|
py
| 2,511 |
python
|
en
|
code
| 0 |
github-code
|
50
|
46965044858
|
import requests
import json
class Networks:
polka = ["polkadot", 10]
kusama = ["kusama", 12]
westend = ['westend', 12]
address = "13mAjFVjFDpfa42k2dLdSnUyrSzK8vAySsoudnxX2EKVtfaq"
current_network = Networks.polka
url = "https://api.subquery.network/sq/ef1rspb/fearless-wallet"
headers = {'Content-Type': 'application/json'}
with open('./history_query.json') as f:
history_elements_query = json.load(f)
data = json.dumps(history_elements_query)
subquery_req = requests.request("POST", url, headers=headers,
data=data)
history = json.loads(subquery_req.text)
subscan_url = "https://{}.api.subscan.io/api/scan/extrinsics".format(current_network[0])
subscan_data = '{"address": "%s","row": 100,"page": 0}' % (address)
subscan_req = requests.request("POST", subscan_url, headers=headers,
data=subscan_data)
result = subscan_req.json()
print(result)
|
novasamatech/substrate-history-comparer
|
old/history_elements_calc.py
|
history_elements_calc.py
|
py
| 938 |
python
|
en
|
code
| 0 |
github-code
|
50
|
8971051726
|
from tkinter import *
from quiz_brain import QuizBrain
THEME_COLOR = "#375362"
TEXT_FONT = ("Arial", 20, "italic")
class QuizInterface:
def __init__(self, quiz_brain: QuizBrain):
self.quiz = quiz_brain
self.window = Tk()
self.window.title("Quizzler")
self.window.config(padx=20, pady=20, bg=THEME_COLOR)
self.score_label = Label(text="Score: 0", bg=THEME_COLOR, fg="white")
self.score_label.grid(row=0, column=1)
self.canvas = Canvas(width=300, height=250, bg="white")
self.question_text = self.canvas.create_text(150, 125,
width=280,
text="Some text here",
fill=THEME_COLOR,
font=TEXT_FONT)
self.canvas.grid(row=1, column=0, columnspan=2, pady=50)
image_true = PhotoImage(file="images/true.png")
self.true_button = Button(image=image_true, highlightthickness=0, command=self.click_true)
self.true_button.grid(row=2, column=0)
image_false = PhotoImage(file="images/false.png")
self.false_button = Button(image=image_false, highlightthickness=0, command=self.click_false)
self.false_button.grid(row=2, column=1)
self.get_next_question()
self.window.mainloop()
def get_next_question(self):
self.canvas.config(bg="white")
if self.quiz.still_has_questions():
self.score_label.config(text=f"Score: {self.quiz.score}")
q_text = self.quiz.next_question()
self.canvas.itemconfig(self.question_text, text=q_text)
else:
self.canvas.itemconfig(self.question_text, text="You've reached the end of the quiz")
self.true_button.config(state="disabled")
self.false_button.config(state="disabled")
def click_true(self):
self.give_feedback(self.quiz.check_answer("True"))
def click_false(self):
self.give_feedback(self.quiz.check_answer("False"))
def give_feedback(self, is_right):
if is_right:
self.canvas.config(bg="green")
else:
self.canvas.config(bg="red")
self.window.after(1000, self.get_next_question)
|
angelov-g/100-days-of-code
|
intermediate-plus/gui-quiz/ui.py
|
ui.py
|
py
| 2,334 |
python
|
en
|
code
| 2 |
github-code
|
50
|
32497366630
|
import sys
import ctypes
from datetime import datetime
from os import makedirs, remove
from os.path import basename, splitext, join, exists
from numpy import concatenate, copy
from numpy.lib.stride_tricks import as_strided
import spacepy
from spacepy import pycdf
TIME_VARIABLE = 'Epoch'
VARIABLES = ['BY_GSM', 'BZ_GSM', 'flow_speed', 'Vx', 'Vy', 'Vz']
REQUIRED_SAMPLING = 60000
CDF_EPOCH = pycdf.const.CDF_EPOCH.value
CDF_DOUBLE = pycdf.const.CDF_DOUBLE.value
CDF_UINT1 = pycdf.const.CDF_UINT1.value
GZIP_COMPRESSION = pycdf.const.GZIP_COMPRESSION
GZIP_COMPRESSION_LEVEL1 = ctypes.c_long(1)
GZIP_COMPRESSION_LEVEL9 = ctypes.c_long(9)
CDF_CREATOR = "EOX:average_omni_hr_1min.py [%s-%s, libcdf-%s]" % (
spacepy.__name__, spacepy.__version__,
"%s.%s.%s-%s" % tuple(
v if isinstance(v, int) else v.decode('ascii')
for v in pycdf.lib.version
)
)
METADATA = {
'Epoch': {
'type': CDF_EPOCH,
'attributes': {
"DESCRIPTION": "Epoch time",
"UNITS": "-",
},
},
"BY_GSM": {
"type": CDF_DOUBLE,
"nodata": 9999.99,
"attributes": {
"DESCRIPTION": "1AU IP By (nT), GSM",
"UNITS": "nT"
}
},
"BZ_GSM": {
"type": CDF_DOUBLE,
"nodata": 9999.99,
"attributes": {
"DESCRIPTION": "1AU IP Bz (nT), GSM",
"UNITS": "nT"
}
},
"Vx": {
"type": CDF_DOUBLE,
"nodata": 99999.9,
"attributes": {
"DESCRIPTION": "Vx Velocity, GSE",
"UNITS": "km/s"
}
},
"Vy": {
"type": CDF_DOUBLE,
"nodata": 99999.9,
"attributes": {
"DESCRIPTION": "Vy Velocity, GSE",
"UNITS": "km/s"
}
},
"Vz": {
"type": CDF_DOUBLE,
"nodata": 99999.9,
"attributes": {
"DESCRIPTION": "Vz Velocity, GSE",
"UNITS": "km/s"
}
},
"flow_speed": {
"type": CDF_DOUBLE,
"nodata": 99999.9,
"attributes": {
"DESCRIPTION": "flow speed, GSE",
"UNITS": "km/s"
}
}
}
METADATA.update({
"Count_" + variable: {
"type": CDF_UINT1,
"attributes": {
"DESCRIPTION": "Averaging window number of samples of %s" % variable,
"UNITS": "-"
}
}
for variable in VARIABLES
})
class CommandError(Exception):
""" Command error exception. """
def usage(exename, file=sys.stderr):
""" Print usage. """
print("USAGE: %s <output-dir> [<input-file-list>]" % basename(exename), file=file)
print("\n".join([
"DESCRIPTION:",
" Perform the delayed 20min averaging of the OMNI 1min data. ",
" (20min window box-car filter with 10min delay) ",
" The input files are passed either from the standard input (default) ",
" or via file. The output files are written in the given output "
" directory",
]), file=file)
def parse_inputs(argv):
""" Parse input arguments. """
argv = argv + ['-']
try:
output_dir = argv[1]
input_files = argv[2]
except IndexError:
raise CommandError("Not enough input arguments!")
return output_dir, input_files
def main(output_dir, input_files):
""" Main function. """
def _get_output_filename(filename, suffix):
base, ext = splitext(basename(filename))
return join(output_dir, "%s%s%s" % (base, suffix, ext))
makedirs(output_dir, exist_ok=True)
file_list = sys.stdin if input_files == "-" else open(input_files)
with file_list:
previous = None
for input_ in (line.strip() for line in file_list):
output = _get_output_filename(input_, "_avg20min_delay10min")
print("%s -> %s" % (input_, output))
process_file(output, input_, previous)
previous = input_
def process_file(filename_out, filename_in, filename_in_previous=None):
""" Process single file. """
sources = [basename(filename_in)]
time_in, data_in = read_data(filename_in)
if filename_in_previous:
time_prev, data_prev = read_data(filename_in_previous, slice(-20, None))
time_in, data_in = merge_data((time_prev, time_in), (data_prev, data_in))
sources = [basename(filename_in_previous)] + sources
check_timeline(time_in)
time_out, data_out = process_data(time_in, data_in)
write_data(filename_out, time_out, data_out, {
"TITLE": "OMNI HR 1min, 20min window average with 10min delay",
"SOURCES": sources,
})
def process_data(time, data):
""" Perform the actual averaging. """
result = {}
for variable in VARIABLES:
input_ = data[variable]
nodata = METADATA[variable]['nodata']
output, counts = boxcar(input_, input_ != nodata, 20)
result[variable] = output
result["Count_" + variable] = counts
return time[20:], result
def boxcar(data, mask, size):
""" Boxcar filter. """
def _reshape(array):
return as_strided(
array,
shape=(array.size - size, size + 1),
strides=(array.itemsize, array.itemsize),
writeable=False
)
data = copy(data)
data[~mask] = 0.0
count = _reshape(mask).sum(axis=1)
average = _reshape(data).sum(axis=1) / count
return average, count
def write_data(filename, time, data, extra_attrs=None):
""" Write data to the output file. """
with cdf_open(filename, "w") as cdf:
_write_global_attrs(cdf, extra_attrs)
_set_variable(cdf, TIME_VARIABLE, time)
for variable in data:
_set_variable(cdf, variable, data[variable])
def _set_variable(cdf, variable, data):
meta = METADATA[variable]
cdf.new(
variable, data, meta['type'], dims=data.shape[1:],
compress=GZIP_COMPRESSION, compress_param=GZIP_COMPRESSION_LEVEL1,
)
cdf[variable].attrs.update(meta['attributes'])
def _write_global_attrs(cdf, extra_attrs=None):
cdf.attrs.update({
"CREATOR": CDF_CREATOR,
"CREATED": (
datetime.utcnow().replace(microsecond=0)
).isoformat() + "Z",
})
cdf.attrs.update(extra_attrs or {})
def read_data(filename, array_slice=Ellipsis):
""" Read the input data. """
with cdf_open(filename) as cdf:
return cdf.raw_var(TIME_VARIABLE)[array_slice], {
variable: cdf.raw_var(variable)[array_slice]
for variable in VARIABLES
}
def check_timeline(time):
""" Check regular data sampling. """
dtime = time[1:] - time[:-1]
if (dtime != REQUIRED_SAMPLING).any():
print("sampling:", dtime.min(), dtime.max())
raise ValueError("Irregular sampling detected!")
def merge_data(time, data):
""" Merge input data arrays. """
return concatenate(time), {
variable: concatenate([item[variable] for item in data])
for variable in VARIABLES
}
def cdf_open(filename, mode="r"):
""" Open a new or existing CDF file.
Allowed modes are 'r' (read-only) and 'w' (read-write).
A new CDF file is created if the 'w' mode is chosen and the file does not
exist.
The returned object is a context manager which can be used with the `with`
command.
NOTE: for the newly created CDF files the pycdf.CDF adds the '.cdf'
extension to the filename if it does not end by this extension already.
"""
if mode == "r":
cdf = pycdf.CDF(filename)
elif mode == "w":
if exists(filename):
remove(filename)
pycdf.lib.set_backward(False) # produce CDF version 3
cdf = pycdf.CDF(filename, "")
else:
raise ValueError("Invalid mode value %r!" % mode)
return cdf
if __name__ == "__main__":
if not sys.argv[1:]:
usage(sys.argv[0], file=sys.stderr)
else:
try:
sys.exit(main(*parse_inputs(sys.argv)))
except CommandError as error:
print("ERROR: %s" % error, file=sys.stderr)
usage(sys.argv[0], file=sys.stderr)
|
ESA-VirES/VirES
|
preprocessing/average_omni_hr_1min.py
|
average_omni_hr_1min.py
|
py
| 8,107 |
python
|
en
|
code
| 2 |
github-code
|
50
|
30243598632
|
# using size to predict price
import numpy as np
import matplotlib.pyplot as plt
from sklearn import linear_model
import pandas as pd
from sklearn.metrics import mean_squared_error
def myCode():
data = pd.read_csv("./houses.csv")
# Needs to reshape to be a 2D array with values.reshape(-1,1)
x = data.iloc[:, 0].values.reshape(-1,1)
print(x)
y = data.iloc[:, 2].values.reshape(-1,1)
print(y)
# Fit model
model = linear_model.LinearRegression()
model.fit(x,y)
# I think this is wrong.
# should be np.mean(abs(predictied_prices-data.price))
print(mean_squared_error(x, model.predict(x)))
# Create random values to predict on. You can also just do this with the normal data.
x_plot = np.arange(0, 968)
x_plot = x_plot.reshape(-1, 1)
# Predict on those points,
# print(model.predict(x_plot))
y_predicted = model.predict(x_plot)
# Plot input, then output
xIn = pd.Series(280).to_frame()
yOut = model.predict(xIn)
print(f"Price on 280: {yOut}")
plt.scatter(xIn, yOut, color='k')
# Scatter x and y
plt.scatter(x, y)
# Then plot the predicted line based of random numbers put into the model.
plt.plot(x_plot, y_predicted, color='red')
plt.show()
"""
def hisCode():
import pandas as pd
import numpy as np
import matplotlib.pyplot as plt
from sklearn import linear_model
data = pd.read_csv("c:/data/houses.csv", header=None)
data.columns = ['living_space', 'size_of_property', 'price']
degree = 5
model = np.poly1d(np.polyfit(data.living_space, data.price, degree))
predicted_prices = model(data.living_space)
print_data = np.linspace(0, np.max(data.living_space), 100)
plt.figure()
plt.scatter(data.living_space, data.price)
plt.plot(print_data, model(print_data), c="black")
plt.ylim(min(data.price) * 0.8, max(data.price) * 1.2)
plt.show()
err = np.mean(np.abs(predicted_prices - data.price))
print(f'The MAE is {err}')
house280 = model(280)
print(f'The prediction for a house of size 280 is {house280}')
"""
if __name__ == "__main__":
myCode()
# hisCode()
|
FinnianHBLR/Datascience-projects
|
week4.py
|
week4.py
|
py
| 2,156 |
python
|
en
|
code
| 0 |
github-code
|
50
|
8441423009
|
'''
Write a program to determine whether an employee is owed any overtime.
You should ask the user how many hours the employee worked this week,
as well as the hourly wage for this employee.
If the employee worked more than 40 hours, you should print a message
which says the employee is due some additional pay, as well as the amount due.
The additional amount owed is 1.5x the employees hourly wage for each
hour worked over the 40 hours. Double overtime is 2x standard wage and is achieved after 80
hours are worked in a week.
'''
def overtime_calculator():
while True:
try:
hours_worked_this_week = float(input("How many hours did you work this week?: "))
break
except ValueError:
print("Enter a numerical value (5 or 5.0, but not five).")
continue
while True:
try:
hourly_wage = float(input("What is your hourly wage?: "))
break
except ValueError:
print("Enter a numerical value (5 or 5.0, but not five).")
continue
if hours_worked_this_week > 80:
standard_earnings = 40 * hourly_wage
overtime_earnings = 40 * (hourly_wage * 1.1)
double_overtime_earnings = (hours_worked_this_week - 80) * (hourly_wage * 2)
total_weekly_earnings = standard_earnings + overtime_earnings + double_overtime_earnings
print(f"You earned {double_overtime_earnings} in double overtime, plus {overtime_earnings} in overtime earnings, and {standard_earnings} at your standard rate. Your total earnings this week is: {total_weekly_earnings}.")
elif hours_worked_this_week > 40:
standard_earnings = 40 * hourly_wage
overtime = (hours_worked_this_week - 40) * (hourly_wage * 1.5)
total_pay = standard_earnings + overtime
print(f"You earned {overtime} in overtime pay this week. Total pay earned this week is {total_pay}.")
else:
earnings = hours_worked_this_week * hourly_wage
print("This week you earned: $", earnings, ". With no overtime.")
overtime_calculator()
|
sauuyer/python-practice-projects
|
day5-overtime-calculator.py
|
day5-overtime-calculator.py
|
py
| 2,082 |
python
|
en
|
code
| 0 |
github-code
|
50
|
28590685007
|
from arclet.alconna import Alconna, Args, CommandMeta
from arclet.alconna.graia import Match, alcommand
from bce.option import Option
from bce.public.api import balance_chemical_equation
from graia.ariadne.app import Ariadne
from graia.ariadne.message.chain import MessageChain
from graia.ariadne.message.element import Source
from graia.ariadne.model import Group, Member
from graia.ariadne.util.cooldown import CoolDown
from graiax.shortcut.saya import dispatch
from rainaa.perm import Ban, PermissionDispatcher
alc = Alconna(
"!配平",
Args["main_args", str],
meta=CommandMeta(
"配平化学方程式",
example="!配平 C6H12O6+O2=CO2+H2O",
fuzzy_match=True,
),
)
@alcommand(alc, send_error=True)
@dispatch(CoolDown(1.5))
@dispatch(PermissionDispatcher(Ban("balance", ["本群还没开放"])))
async def setu(
app: Ariadne,
group: Group,
message: MessageChain,
member: Member,
source: Source,
main_args: Match[str],
):
exp = main_args.result
try:
resp = balance_chemical_equation(exp, Option())
if isinstance(resp, str):
await app.send_group_message(
group,
f"配平完成\n{resp}",
quote=source,
)
except Exception as e:
await app.send_group_message(
group,
f"配平失败\n{str(e)}",
quote=source,
)
|
linyunze/RainAa
|
module/balance.py
|
balance.py
|
py
| 1,426 |
python
|
en
|
code
| 0 |
github-code
|
50
|
21554328390
|
from PyQt5 import QtCore, QtWidgets, QtGui
from collections import deque
from ..const import *
class logViewerWidget:
def __init__(self, parent: QtWidgets.QWidget, name: str, pos: QtCore.QRect):
self.widget = QtWidgets.QTextBrowser(parent)
self.widget.setGeometry(pos)
self.widget.setObjectName(name)
self.log = deque()
self.scrollBar = self.widget.verticalScrollBar()
def setText(self, text: any):
if type(text) is not str:
text = str(text)
self.log.append(text)
if len(self.log) > LOG_ROW_NUMBER:
self.log.popleft()
self.widget.setText("\n".join(self.log))
self.scrollBar.setValue(self.scrollBar.maximum())
|
s-ktmy/nitfc-openCampus_2020
|
src/Widget/logViewerWidget.py
|
logViewerWidget.py
|
py
| 725 |
python
|
en
|
code
| 0 |
github-code
|
50
|
22058705964
|
#!/bin/python
import sys
from collections import Counter
def makingAnagrams(s1, s2):
c1 = Counter(s1)
c2 = Counter(s2)
for x in set(c1).intersection(set(c2)):
curr = min(c1[x],c2[x])
c1[x] -= curr
c2[x] -= curr
return sum(c1.values())+sum(c2.values())
# Complete this function
s1 = raw_input().strip()
s2 = raw_input().strip()
result = makingAnagrams(s1, s2)
print(result)
|
thesharpshooter/hackerrank
|
strings/makingAnagrams.py
|
makingAnagrams.py
|
py
| 420 |
python
|
en
|
code
| 0 |
github-code
|
50
|
44098013268
|
from selenium import webdriver
from selenium.webdriver.chrome.service import Service as ChromeService
from webdriver_manager.chrome import ChromeDriverManager
from MainPageCalculator import Calculator
def test_calculator():
browser = webdriver.Chrome(service=ChromeService(ChromeDriverManager().install()))
calculator = Calculator(browser)
calculator.input_secund('45')
calculator.click_button_7()
calculator.click_button_plus()
calculator.click_button_8()
calculator.click_button_equally()
calculator.wait_result(46, '15')
assert calculator.test_result_of_sum() == '15'
|
Julia2810/homeworks
|
ДЗ7/Task_2/test_result_on_calculator.py
|
test_result_on_calculator.py
|
py
| 611 |
python
|
en
|
code
| 0 |
github-code
|
50
|
33763552438
|
import torch
import torch.nn as nn
import torch.nn.functional as F
from cwlayers import CWConv2d, CWLinear
class MnistCwConv(nn.Module):
def _initialize_weights(self):
for m in self.modules():
if isinstance(m, nn.Conv2d):
nn.init.kaiming_normal_(m.weight, mode='fan_out', nonlinearity='relu')
if m.bias is not None:
nn.init.constant_(m.bias, 0)
elif isinstance(m, nn.BatchNorm2d):
nn.init.constant_(m.weight, 1)
nn.init.constant_(m.bias, 0)
elif isinstance(m, nn.Linear):
nn.init.normal_(m.weight, 0, 0.01)
nn.init.constant_(m.bias, 0)
def __init__(self):
super(MnistCwConv, self).__init__()
self.conv1 = CWConv2d(1, 10, kernel_size=5)
self.conv2 = CWConv2d(10, 20, kernel_size=5)
self.conv2_drop = nn.Dropout2d()
self.fc1 = CWLinear(320, 50)
self.fc2 = CWLinear(50, 10)
self._initialize_weights()
def forward(self, x):
x = F.relu(F.max_pool2d(self.conv1(x), 2))
x = F.relu(F.max_pool2d(self.conv2_drop(self.conv2(x)), 2))
x = x.view(-1, 320)
x = F.relu(self.fc1(x))
x = F.dropout(x, training=self.training)
x = self.fc2(x)
return F.log_softmax(x, dim=1)
|
sytelus/NNExp
|
NNExp/pytorch/mnist/mnist_cwconv.py
|
mnist_cwconv.py
|
py
| 1,342 |
python
|
en
|
code
| 1 |
github-code
|
50
|
4095520800
|
"""
Summation of primes
Problem 10
The sum of the primes below 10 is 2 + 3 + 5 + 7 = 17.
Find the sum of all the primes below two million.
"""
import math
def is_prime(x):
if x%2 == 0 and x!=2:
return False
for i in range(3, int(math.sqrt(x))+1, 2):
if x%i == 0:
return False
return True
sum_of_primes = 2
for i in range(3, 2000000):
if is_prime(i):
print(i)
sum_of_primes += i
print(sum_of_primes)
|
reddynt/project-euler-solutions
|
10-summation of primes.py
|
10-summation of primes.py
|
py
| 469 |
python
|
en
|
code
| 0 |
github-code
|
50
|
72825087835
|
import csv
import operator
import pdb
import os
import argparse
def get_args():
parser = argparse.ArgumentParser()
parser.add_argument('--tsv_file', type=str, required=True, help='Please set a tsv file you want to parsse.')
args = parser.parse_args()
return args
def main(filename, root_path):
with open(filename) as papers:
music_tech_papers = csv.DictReader(papers, dialect='excel-tab')
#get items in column
f = music_tech_papers.fieldnames
paths = []
accumulated_table = []
num_paper = 0
structure = []
for entry in music_tech_papers:
# get attributes from table
mother_group = entry[f[0]]
child_group = entry[f[1]]
publication_year = entry[f[2]]
author = entry[f[3]]
title = entry[f[4]]
title_url = entry[f[5]]
abstract= entry[f[6]]
source_code = entry[f[7]]
source_code_url= entry[f[8]]
data_set1 = entry[f[9]]
data_set_url1 = entry[f[10]]
data_set2 = entry[f[11]]
data_set_url2 = entry[f[12]]
data_set3 = entry[f[13]]
data_set_url3 = entry[f[14]]
demo1 = entry[f[15]]
demo_url1 = entry[f[16]]
demo2 = entry[f[17]]
demo_url2 = entry[f[18]]
path = file_destination(mother_group, child_group, root_path)
paths.append(path)
create_md(path, publication_year, title, title_url, author,
abstract, data_set1, data_set_url1, data_set2, data_set_url2, data_set3, data_set_url3,
source_code, source_code_url, demo1, demo_url1, demo2, demo_url2)
structure.append((mother_group, child_group))
# complete_data_table = group_dataset_table(mother_group, child_group,
# data_set1, data_set_url1,
# data_set2, data_set_url2,
# data_set3, data_set_url3,
# accumulated_table)
print('check_{}'.format(filename))
num_paper += 1
paths = list(dict.fromkeys(paths))
# pure_structure = list(dict.fromkeys(structure))
for path in paths:
merge_mds(path)
def group_dataset_table(mother, child,
data_set1, data_set_url1,
data_set2, data_set_url2,
data_set3, data_set_url3,
accumuated_list):
temp = [data_set1, data_set2, data_set3]
content = [i for i in temp if i != '']
if len(content) == 0:
return accumuated_list
elif len(content) == 1:
table_line = '|' + mother + '|' + child + '|[' + data_set1 + '](' + data_set_url1 + ')|' \
+ '|' + '|' + '|' +'|' \
+ '|' + '|' + '|' + '|'
return accumuated_list.append(table_line)
elif len(content) == 2:
table_line = '|' + mother + '|' + child + '|[' + data_set1 + '](' + data_set_url1 + ')|' \
+ '|[' + data_set2 + '](' + data_set_url2 + ')|' \
+ '|' + '|' + '|' + '|'
return accumuated_list.append(table_line)
elif len(content) == 3:
table_line = '|' + mother + '|' + child + '|' + data_set1 + '|' + data_set_url1 + '|' \
+ '|[' + data_set2 + '](' + data_set_url2 + ')|' \
+ '|[' + data_set3 + '](' + data_set_url3 + ')|'
return accumuated_list.append(table_line)
# '|' + + '|' + + '|' + + '|' + + '|'
def merge_mds(path_to_mds):
md_list_temp = os.listdir(path_to_mds)
md_list = sorted(md_list_temp, reverse=True)
readme_name = "README.md"
with open(path_to_mds+readme_name, "w+") as readme:
for md in md_list:
with open(path_to_mds+md) as temp:
readme.write(temp.read())
if md != "README.md":
os.remove(path_to_mds+md)
readme.close()
return 0
def file_destination(mother_group, child_group,
root_path):
return root_path + mother_group + '/' + child_group + '/'
def create_md(path, publication_year, title, title_url, author,
abstract, data_set1, data_set_url1, data_set2, data_set_url2, data_set3, data_set_url3,
source_code, source_code_url, demo1, demo_url1, demo2, demo_url2):
filename = title + ".md"
data = [data_set1, data_set_url1, data_set2, data_set_url2, data_set3, data_set_url3]
source = [source_code, source_code_url]
demo = [demo1, demo_url1, demo2, demo_url2]
print(path + publication_year + filename)
with open(path+publication_year+filename,"w+") as paper_md:
line_title = "# " + ' ' + '['+ title +']' +'('+ title_url+')' + '\n'
line_author = "**Author**: " + author + '\n' + '\n'
line_year = "**Year**: " + publication_year + '\n'
line_abstract = ">**Abstract**: " + abstract +'\n' + '\n'
line_dataset = "**Data Set**: "
line_sourcecode = "**Source Code**: "
line_demo = "**Demo**: "
for i in range(0,len(data)):
if data[i] == '':
if i == 0:
line_dataset += "Not availabe, "
break
else:
break
elif i % 2 == 0:
line_dataset += '['+ data[i] +']' + '(' + data[i+1] +'), '
line_dataset = line_dataset[:-2] + "\n\n"
# TODO fix here; source code column is redundant.
for i in range(0,len(source)):
if source[i] == '':
if i == 0:
line_sourcecode += "Not availabe, "
break
else:
break
elif i % 2 == 0:
line_sourcecode += '['+ source[i] +']' + '(' + source[i+1] +'), '
line_sourcecode = line_sourcecode[:-2] + "\n\n"
for i in range(0,len(demo)):
if demo[i] == '':
if i == 0:
line_demo += "Not availabe, "
break
else:
break
elif i % 2 == 0:
line_demo += '['+ demo[i] +']' + '(' + demo[i+1] +'), '
line_demo = line_demo[:-2] + "\n\n"
paper_md.write(line_title)
paper_md.write(line_author)
paper_md.write(line_year)
paper_md.write(line_abstract)
paper_md.write(line_dataset)
paper_md.write(line_sourcecode)
paper_md.write(line_demo)
paper_md.close()
return 0
def sort_papers_by_year(filename):
with open(filename, "r+") as mixing:
mixing_tsv = csv.DictReader(mixing, dialect='excel-tab')
f = mixing_tsv.fieldnames
sorted_mixing = sorted(mixing_tsv, key=operator.itemgetter('Publication Year'), reverse=True)
mixing.seek(0)
writer = csv.DictWriter(mixing, delimiter='\t', dialect='excel-tab', fieldnames=f)
writer.writeheader()
for row in sorted_mixing:
writer.writerow(row)
mixing.truncate()
mixing.close()
def create_main_md():
# first block + structure.md + end_block.md
with open("../README.md", "w+") as readme, open("../first_block.md") as first_block_md, \
open("../structure.md") as structure_md, open("../end_block.md") as end_block:
readme.write(first_block_md.read())
readme.write(structure_md.read())
readme.write(end_block.read())
if __name__ == "__main__":
file = get_args()
root_path = '../'
sort_papers_by_year(file.tsv_file)
main(file.tsv_file, root_path)
create_main_md()
|
Hyon0930/MusicTechPapers
|
src/organise_papers.py
|
organise_papers.py
|
py
| 7,714 |
python
|
en
|
code
| 1 |
github-code
|
50
|
40085189624
|
import argparse
from dataclasses import dataclass
import os.path
import re
import sys
from google.auth.transport.requests import Request
from google.oauth2.credentials import Credentials
from google_auth_oauthlib.flow import InstalledAppFlow
from googleapiclient.discovery import build
from googleapiclient.errors import HttpError
@dataclass
class Date:
"""
A custom date implementation because sometimes we only know the birthday
but not the year.
"""
day: int
month: int
year: int
def __repr__(self):
out = f"{self.day:02d}.{self.month:02d}"
if self.year is not None:
out += f".{self.year}"
return out
@dataclass
class Person:
name: str
birthdate: Date
google_id: str
google_etag: str
def parse_input(input):
(name, datetext) = input.split(":")
name = name.strip()
datetext = datetext.strip()
# Parse european date format dd.mm.yyy (eg: 23.11.2000)
# Note the year is optional here.
if re.match(r"\d{2}\.\d{2}(\.\d{4})?", datetext):
(day, month, _, year) = re.search(
r"(\d{2})\.(\d{2})(\.(\d{4}))?", datetext
).groups()
date = Date(day, month, year)
# Parse ISO format yyyy-mm-dd (eg: 2000-11-23)
elif re.match(r"(\d{4})-(\d{2})-(\d{2})", datetext):
(year, month, day) = re.match(r"(\d{4})-(\d{2})-(\d{2})", datetext).groups()
date = Date(int(day), int(month), int(year))
# Date parsing error
else:
print(
"ERROR: I cannot parse the date.\n"
"I can only read dates in the european or ISO format.\n"
"Examples: 2000-11-23, 23.11.2000, 23.11 (when year is unknonw)",
file=sys.stderr,
)
exit(1)
return Person(name, date, None, None)
def google_auth():
# If modifying these scopes, delete the file token.json.
SCOPES = ["https://www.googleapis.com/auth/contacts"]
creds = None
# The file token.json stores the user's access and refresh tokens, and is
# created automatically when the authorization flow completes for the first
# time.
if os.path.exists("token.json"):
creds = Credentials.from_authorized_user_file("token.json", SCOPES)
# If there are no (valid) credentials available, let the user log in.
if not creds or not creds.valid:
if creds and creds.expired and creds.refresh_token:
creds.refresh(Request())
else:
flow = InstalledAppFlow.from_client_secrets_file("credentials.json", SCOPES)
creds = flow.run_local_server(port=0)
# Save the credentials for the next run
with open("token.json", "w") as token:
token.write(creds.to_json())
return creds
def parse_google_person(p):
google_id = p["resourceName"]
google_etag = p["etag"]
name = p["names"][0]["displayName"]
date = None
try:
dateinfo = p["birthdays"][0]["date"]
day = dateinfo["day"]
month = dateinfo["month"]
date = Date(day, month, None)
year = dateinfo["year"]
date = Date(day, month, year)
except KeyError:
pass
return Person(name, date, google_id, google_etag)
def find_person(service, target):
req = service.people().searchContacts(
query=target.name, readMask="names,birthdays", pageSize=64
)
results = req.execute()
try:
persons = list(
map(lambda r: parse_google_person(r["person"]), results["results"])
)
except KeyError:
persons = []
if len(persons) == 0:
print("The name didn't match anyone in your contacts.")
exit(0)
elif len(persons) == 1:
person = persons[0]
# If the name is not the same ask if it is the correct person
if target.name.lower() != person.name.lower():
print("Is this the person you meant? (Y/n)")
print(f"{person.name}: {person.birthdate}")
answer = input("> ")
if answer.lower() != "y":
exit(0)
elif person.birthdate is not None:
print(
"The person already has a birthday, do you want to override it? (Y/n)"
)
print(f"{person.name}: {person.birthdate}")
answer = input("> ")
if answer.lower() != "y":
exit(0)
# If the person already has a birthday ask if it should be overwritten
return person
else:
print("Which of them should be updated:")
for (n, person) in enumerate(persons):
print(f"[{n}] {person.name}: {person.birthdate}")
selection = int(input("> "))
return persons[selection]
def update_person(service, person: Person):
body = {
"etag": person.google_etag,
"birthdays": [
{
"date": {
"day": person.birthdate.day,
"month": person.birthdate.month,
"year": person.birthdate.year,
},
},
],
}
try:
result = (
service.people()
.updateContact(
resourceName=person.google_id,
body=body,
updatePersonFields="birthdays",
)
.execute()
)
except HttpError as e:
print("🔥 Google responded with an error updating the contact:")
print(e)
exit(1)
updated_person = parse_google_person(result)
print(f"🎉 Updated {updated_person.name}: {updated_person.birthdate}")
def main():
# Create an argument parser
parser = argparse.ArgumentParser(
description="Annotate google contacts with birthdays."
)
parser.add_argument("input", type=str)
args = parser.parse_args()
# Parse the person and new birthdate
person = parse_input(args.input)
# Authenticate with the google API
creds = google_auth()
service = build("people", "v1", credentials=creds)
try:
# Find the requested person in the contacts and store the google id
found_person = find_person(service, person)
person.name = found_person.name
person.google_id = found_person.google_id
person.google_etag = found_person.google_etag
# Update the birthday
update_person(service, person)
except (KeyboardInterrupt, EOFError):
pass
if __name__ == "__main__":
main()
|
flofriday/scripts
|
bdmaker/bdmaker.py
|
bdmaker.py
|
py
| 6,472 |
python
|
en
|
code
| 0 |
github-code
|
50
|
30723639812
|
from datetime import datetime
from sqlalchemy import create_engine
from sqlalchemy.orm import sessionmaker
from research_models import *
from pandas import read_excel
import numpy
engine = create_engine('sqlite:///research.db')
Session = sessionmaker(bind=engine)
session = Session()
def load_funding_resource(input_file, sheet_name=None):
""" Read funding resources from the input file and add to the db if not exist.
:param input_file:
:param sheet_name:
:return None:
"""
df = read_excel(input_file, sheet_name=sheet_name)
for source_name in df[df.columns[1]]:
src_ = session.query(FundingSource).filter(
FundingSource.source == source_name).first()
if src_ is None:
funding_source = FundingSource(source=source_name)
session.add(funding_source)
session.commit()
'''
staff = Staff(
#staff_fname = row['first name'],
#staff_lname = row['last name'],
staff_email = row['all main researcher email']
)
department = Department(
department_name=row['all department']
)
'''
#session.add(staff)
#session.add(department)
def load_funding_agency(input_file, sheet_name=None):
df = read_excel(input_file, sheet_name=sheet_name)
for agency_name in df[df.columns[2]]:
ag = session.query(FundingAgency).filter(
FundingAgency.name == agency_name).first()
if ag is None:
agency = FundingAgency(name=agency_name)
session.add(agency)
session.commit()
def load_research_project(input_file, sheet_name=None):
"""
Load project information to the db.
:param input_file:
:param sheet_name:
:return None:
"""
df = read_excel(input_file, sheet_name=sheet_name)
for idx, project in df[[df.columns[4], df.columns[5]]].iterrows():
th_name, en_name = project
en_name = en_name.strip() if not isinstance(en_name, float) else None
th_name = th_name.strip() if not isinstance(th_name, float) else None
if not th_name: # None or empty string
th_name = en_name
if th_name and en_name:
p = ResearchProject(title_th=th_name, title_en=en_name)
session.add(p)
session.commit()
def load_researcher(input_file, sheet_name=None):
"""
:param input_file:
:param sheet_name:
:return:
"""
research_df = read_excel('samplefunding.xlsx',sheet_name='funding')
for ix,row in research_df.iterrows():
research = Research(
research_title_th = row['research title thai'],
research_title_en = row['research title eng'],
# research_field = row['research_field'],
# research_budget_thisyear = row['research_budget_thisyear'],
est_funding = row['amount fund'],
research_startdate = row['start date'],
research_enddate = row['end date'],
# duration = row['duration'],
research_contract = row['research contract']
)
session.add(research)
session.commit()
#st = session.query(Staff).filter(Staff.staff_email=='napat.son').first()
#st = session.query(Staff).filter(Staff.staff_email==row['staff_email']).first()
#st.staff_id
#int(datetime.strftime(row['research_startdate'], '%Y%m%d'))
if __name__ == '__main__':
# load_funding_resource('samplefunding.xlsx', 'funding')
# load_funding_agency('samplefunding.xlsx', 'funding')
load_research_project('samplefunding.xlsx', 'funding')
|
likit/sandbox
|
load_data.py
|
load_data.py
|
py
| 3,544 |
python
|
en
|
code
| 0 |
github-code
|
50
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.